source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
timer_control.py
|
import datetime
import os
import threading
from time import sleep
from devdeck_core.controls.deck_control import DeckControl
class TimerControl(DeckControl):
def __init__(self, key_no, **kwargs):
self.start_time = None
self.end_time = None
self.thread = None
super().__init__(key_no, **kwargs)
def initialize(self):
with self.deck_context() as context:
with context.renderer() as r:
r.image(os.path.join(os.path.dirname(__file__), "../assets/font-awesome", 'stopwatch.png')).end()
def pressed(self):
if self.start_time is None:
self.start_time = datetime.datetime.now()
self.thread = threading.Thread(target=self._update_display)
self.thread.start()
elif self.end_time is None:
self.end_time = datetime.datetime.now()
self.thread.join()
with self.deck_context() as context:
with context.renderer() as r:
r.text(TimerControl.time_diff_to_str(self.end_time - self.start_time))\
.font_size(120)\
.color('red')\
.center_vertically().center_horizontally().end()
else:
self.start_time = None
self.end_time = None
with self.deck_context() as context:
with context.renderer() as r:
r.image(os.path.join(
os.path.join(os.path.dirname(__file__), "../assets/font-awesome", 'stopwatch.png'))).end()
def _update_display(self):
while self.end_time is None:
if self.start_time is None:
sleep(1)
continue
cutoff = datetime.datetime.now() if self.end_time is None else self.end_time
with self.deck_context() as context:
with context.renderer() as r:
r.text(TimerControl.time_diff_to_str(cutoff - self.start_time)) \
.font_size(120) \
.center_vertically().center_horizontally().end()
sleep(1)
@staticmethod
def time_diff_to_str(diff):
seconds = diff.total_seconds()
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d}'
|
decorators.py
|
from threading import Thread
import cProfile, pstats, io, os, errno, signal, time
from functools import wraps
from contextlib import contextmanager
from utilmy.debug import log
def test_all():
test_decorators()
test_decorators2()
def test_decorators():
"""
#### python test.py test_decorators
"""
from utilmy.decorators import thread_decorator, timeout_decorator, profiler_context,profiler_decorator, profiler_decorator_base
@thread_decorator
def thread_decorator_test():
log("thread decorator")
@profiler_decorator_base
def profiler_decorator_base_test():
log("profiler decorator")
@timeout_decorator(10)
def timeout_decorator_test():
log("timeout decorator")
profiler_decorator_base_test()
timeout_decorator_test()
thread_decorator_test()
def test_decorators2():
from utilmy.decorators import profiler_decorator, profiler_context
@profiler_decorator
def profiled_sum():
return sum(range(100000))
profiled_sum()
with profiler_context():
x = sum(range(1000000))
print(x)
from utilmy import profiler_start, profiler_stop
profiler_start()
print(sum(range(1000000)))
profiler_stop()
###################################################################################
from utilmy.decorators import timer_decorator
@timer_decorator
def dummy_func():
time.sleep(2)
class DummyClass:
@timer_decorator
def method(self):
time.sleep(3)
dummy_func()
a = DummyClass()
a.method()
########################################################################################################################
########################################################################################################################
def thread_decorator(func):
""" A decorator to run function in background on thread
Return:
background_thread: ``Thread``
"""
@wraps(func)
def wrapper(*args, **kwags):
background_thread = Thread(target=func, args=(*args,))
background_thread.daemon = True
background_thread.start()
return background_thread
return wrapper
########################################################################################################################
class _TimeoutError(Exception):
"""Time out error"""
pass
########################################################################################################################
def timeout_decorator(seconds=10, error_message=os.strerror(errno.ETIME)):
"""Decorator to throw timeout error, if function doesnt complete in certain time
Args:
seconds:``int``
No of seconds to wait
error_message:``str``
Error message
"""
def decorator(func):
def _handle_timeout(signum, frame):
raise _TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
def timer_decorator(func):
"""
Decorator to show the execution time of a function or a method in a class.
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
end = time.perf_counter()
print(f'function {func.__name__} finished in: {(end - start):.2f} s')
return result
return wrapper
########################################################################################################################
@contextmanager
def profiler_context():
"""
Context Manager the will profile code inside it's bloc.
And print the result of profiler.
Example:
with profiler_context():
# code to profile here
"""
from pyinstrument import Profiler
profiler = Profiler()
profiler.start()
try:
yield profiler
except Exception as e:
raise e
finally:
profiler.stop()
print(profiler.output_text(unicode=True, color=True))
def profiler_decorator(func):
"""
A decorator that will profile a function
And print the result of profiler.
"""
@wraps(func)
def wrapper(*args, **kwargs):
from pyinstrument import Profiler
profiler = Profiler()
profiler.start()
result = func(*args, **kwargs)
profiler.stop()
print(profiler.output_text(unicode=True, color=True))
return result
return wrapper
def profiler_decorator_base(fnc):
"""
A decorator that uses cProfile to profile a function
And print the result
"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return retval
return inner
def test0():
with profiler_context():
x = sum(range(1000000))
print(x)
from utilmy import profiler_start, profiler_stop
profiler_start()
print(sum(range(1000000)))
profiler_stop()
@thread_decorator
def thread_decorator_test():
log("thread decorator")
@profiler_decorator_base
def profiler_decorator_base_test():
log("profiler decorator")
@timeout_decorator(10)
def timeout_decorator_test():
log("timeout decorator")
@profiler_decorator
def profiled_sum():
return sum(range(100000))
@timer_decorator
def dummy_func():
time.sleep(2)
|
run.3-ex-out-parallel.py
|
"""
file: run.3-ex-out-parallel.py
As able, get the latest image add to queue, then repeat
As able, get the latest queued and process, then repeat
Usage:
```sh
# as printed out humans
docker run --entrypoint="/usr/bin/python3" --volume="$(pwd)/out:/out" -it care-tpe-scripts:latest \
run.3-ex-out-parallel.py --model=mobilenet_thin --resize=432x368 \
--image-url="http://192.168.1.132:55627/camera.jpg"
```
"""
import argparse
import logging
import sys
import time
import urllib.request
import requests
import cv2
from PIL import Image
import io
from tf_pose import common
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
# multithreading
import threading
from queue import Queue
queue_lock = threading.Lock()
t0 = time.time()
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
class QueueProcessor(object):
def __init__(self):
self.start_image_queue = Queue(maxsize=8) # max size should really be just the number of image processor threads
self.image_queue = Queue(maxsize=8)
self.human_queue = Queue(maxsize=12)
self.print_lock = threading.Lock()
def tprint(self, arg):
with self.print_lock:
print(arg)
def tinfo(self, arg):
with self.print_lock:
logger.info(arg)
def info(self, arg):
logger.info(arg)
def add_images_to_queue(self):
while True:
#! start_time = time.time()
# maybe block until ready to download new images
#! self.tinfo('add_images_to_queue: (waiting ) self.start_image_queue.get()')
_req_time = self.start_image_queue.get()
#! self.tinfo('add_images_to_queue: (waited %.4fs) continuing from self.start_image_queue.get()' % (time.time() - start_time))
# download latest image
#! t_dl = time.time()
try:
resp = urllib.request.urlopen(image_url)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.tinfo('add_images_to_queue: (error ) downloading image failed')
continue
#! elapsed_dl = time.time() - t_dl
#! self.tinfo('add_images_to_queue: (downlo %.4fs) downloaded image %s' % (elapsed_dl, args.image_url))
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
self.start_image_queue.task_done()
if image is None:
with self.print_lock:
logger.error('Image can not be read from "%s"' % args.image)
else:
# add identifying info to item
#! self.tinfo('add_images_to_queue: (waiting ) self.image_queue.put((image_url, image))')
#! t_put = time.time()
self.image_queue.put((image_url, image))
#! with self.print_lock:
#! self.info('add_images_to_queue: (waited %.4fs) continuing from self.image_queue.put((image_url, image))' % (time.time() - t_put))
#! self.info('add_images_to_queue: (loop %.4fs) completed loop' % (time.time() - start_time))
def process_image_queue(self):
w, h = model_wh(args.resize)
if w == 0 or h == 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
while True:
#! start_time = time.time()
# trigger ready to process another image
#! self.tinfo('process_image_queue: (waiting ) self.start_image_queue.put(time.time())')
self.start_image_queue.put(time.time(), block=False)
#! self.tinfo('process_image_queue: (waited %.4fs) continuing from self.start_image_queue.put(time.time())' % (time.time() - start_time))
#! get_time = time.time()
#! self.tinfo('process_image_queue: (waiting ) self.image_queue.get()')
(_image_url, current_image) = self.image_queue.get()
#! self.tinfo('process_image_queue: (waited %.4fs) continuing from self.image_queue.get()' % (time.time() - get_time))
humans = e.inference(current_image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
self.image_queue.task_done()
self.human_queue.put((current_image, humans), block=False)
#! with self.print_lock:
#! # once completed, trigger getting new images
#! self.info('process_image_queue: (loop %.4fs) completed loop' % (time.time() - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation run')
parser.add_argument('--image-url', type=str, default='')
parser.add_argument('--post-url', type=str, default='')
parser.add_argument('--model', type=str, default='cmu', help='cmu / mobilenet_thin')
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=4.0')
args = parser.parse_args()
image_url = args.image_url
post_url = args.post_url
logger.info('Tracking image_url: %s' % image_url)
elapsed = time.time() - t0
logger.info('initialized imports and args in %.4f seconds.' % elapsed)
qp = QueueProcessor()
try:
# start listening for ability to add images to process queue
t = threading.Thread(target=qp.add_images_to_queue)
t.daemon = True
t.start()
# start downloading the first image.
# also should ensure that the image getter is always one image ahead of the processor
qp.start_image_queue.put(time.time())
# the processors will be ready
for i in range(2):
t = threading.Thread(target=qp.process_image_queue)
t.daemon = True
t.start()
print(threading.enumerate())
# huemon tracker
t0 = time.time()
c0 = 0
while True:
(image, humans) = qp.human_queue.get()
with queue_lock:
# output humans
if post_url:
#! t_print = time.time()
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
with io.BytesIO() as output:
Image.fromarray(image).save(output, 'jpeg')
requests.post(post_url, data=output.getvalue())
#! elapsed_print = time.time() - t_print
#! logger.info('posted humans image in %.4f seconds' % (elapsed_print))
else:
print(humans)
c0 += 1
qp.human_queue.task_done()
except (KeyboardInterrupt, SystemExit):
elapsed = time.time() - t0
print()
print('total frames %d' % c0)
print('total elapsed %.4fs' % elapsed)
print('avg fps %.4f' % (c0 / elapsed))
print('avg sec %.4fs' % (elapsed / c0))
|
Downloader.py
|
# -*- coding: utf-8 -*-
__author__ = 'Mayank Gupta'
__version__ = '1.1'
__license__ = 'License :: MIT License'
from typing import Union, Callable
from typing import List, Tuple
from typing import Dict, Type
from typing import Any, Optional
import socket,select,re,ssl,threading,sys,os
from urllib.parse import urlparse, unquote
from time import sleep, time
import tempfile, os, logging
from select import select
from random import randint
logg = logging.getLogger(__name__)
class Download(object):
'''
This :class:`Download <Download>` will download streams with multi-connections.
:param str url:
Pass the download link
:param str name:
(optional) Pass the name of file name.
:param str dire:
(optional) Pass the dir for output file with excluding filename.
:param bool status:
(optional) Pass if you want to enable process bar. **Default[False]**
:param int connection:
(optional) Pass number is connection to be create. **Default[8]**
:param int chunk:
(optional) Pass the chunk/buffer size for accepting packets. **Default[5120]**
:rtype: str
:returns: the file name
'''
def __init__(self,url:str,dire:str="",name:str="",status:bool=False,connection:int=8, chunk:int = 5120) -> None:
self.name = name.replace(" ","_")
self.dire = dire
if self.dire:
self.dire = self.create_user_dir(dire)
if self.dire[-1] == "/":
self.dire = self.dire[:-1]
self.status = status
self.chunk = chunk
self.connection = connection
self.url = unquote(url)
def start(self) -> str:
'''
Start will fire up the downloading
'''
protocol, url, host =self.RawData(self.url)
logg.debug(f'protocol: {protocol}, host: {host}')
check = self.check_multi(protocol, self.url, host)
logg.debug(f'Download check status: {check}')
if check[0] == "0":
# Data for process bar
# gg= bytes downloaded, size total size of file,
# when is bool vaule to start and stop the process bar
self.size = int(self.header["content-length"])
self.gg = 0 # download bytes
self.when = True
#get filename
name = self.getfilename()
logg.debug(f'Filename: {name}, Filesize: {self.size}')
# Create ranges for downloading chunks in parts
ranges = self.get_range(int(self.header["content-length"]),self.connection)
self.files = {}
threads = []
for n,m in enumerate(ranges):
req=self.gen_req(host,url,{"range":f"bytes={m}"})
threads.append(threading.Thread(target=self.down, args=(protocol, host, req, m, str(n))))
# break
if self.status:threading.Thread(target=self.run).start()
for n in threads:n.start()
for n in threads:n.join()
# End of process bar
self.when = False
with open(name,"wb") as f:
for n in range(len(self.files)):
ff=self.files[n]
ff.seek(0)
f.write(ff.read())
ff.close()
f.close()
# end of procedd bar with 100%
p=int(int(self.gg)*50/int(self.size))
if self.status:print("Process: [{}] {}% Complete {:<10}".format("█"*p+"-"*(50-p), p*100/50,"0.0 Kb/s"))
logg.debug(f"Downloading conpleted 100% Filename{name}")
# print(name)
return name
elif check[0] == "1" :
name = self.getfilename()
req=self.gen_req(host,url)
sock=self.connect(protocol,host)
sock.sendall(req)
data=sock.recv(self.chunk)
header,image=self.hparsec(data)
f = open(name,"wb")
f.write(image)
# gg= bytes downloaded, size total size of file,
# when is bool vaule to start and stop the process bar
self.gg = len(image)
self.size = int(header["content-length"])
self.when = True
#Start The process bar if status TRUE
if self.status:threading.Thread(target=self.run).start()
logg.debug(f'Filename: {name}, Filesize: {self.size}')
while True:
try:
data = sock.recv(self.chunk)
if not data:break
f.write(data)
self.gg += len(data)
except socket.timeout:
break
#End od process bar
self.when = False
# end of procedd bar with 100%
p=int(int(self.gg)*50/int(self.size))
if self.status:print("Process: [{}] {}% Complete {:<10}".format("█"*p+"-"*(50-p), p*100/50,"0.0 Kb/s"))
# Return the file name
return name
elif check[0] == "2" :
name = self.getfilename()
req=self.gen_req(host,url)
sock=self.connect(protocol,host)
sock.sendall(req)
data=sock.recv(self.chunk)
header,image=self.hparsec(data)
f = open(name,"wb")
f.write(image)
if self.status:
logg.debug("We can't run status bar for this, No content-length found")
logg.debug(f'Filename: {name}, Filesize: Unknown')
while True:
try:
data = sock.recv(self.chunk)
if not data:break
f.write(data)
except socket.timeout:
break
# Return the file name
return name
else:
return check[1]
def create_user_dir(self,foldername:str) -> str:
if not os.path.exists(foldername):
os.makedirs(foldername)
return foldername
def rangediff(self,s):
c,b = s.split("-")
c,b = int(c),int(b)
if self.size == b:
diff = b-c
return diff
else:
diff = b-c
return diff+1
def down(self, protocol:str, host:str, req:bytes, range:list, id:str="") -> None:
f = tempfile.TemporaryFile()
if id != "":self.files[int(id)] = f
sock=self.connect(protocol,host)
diff = self.rangediff(range)
sock.settimeout(15)
sock.sendall(req)
data=sock.recv(self.chunk)
header,image=self.hparsec(data)
self.gg += len(image)
local_gg = 0
local_gg =+len(image)
f.write(image)
while True:
try:
data = sock.recv(self.chunk)
if not data:break
f.write(data)
self.gg += len(data)
local_gg =+len(data)
if local_gg >= diff:
break
except socket.timeout:
break
f.seek(0)
def run(self):
self.temp1=0
while self.when:
speed=(self.gg-self.temp1)/1024
p=int(int(self.gg)*50/int(self.size))
print("Process: [{}] {}% Complete {:<8}Kb/s".format("█"*p+"-"*(50-p), p*100/50,"{:.2f}".format(speed)),end="\r")
self.temp1=self.gg
sleep(1)
def get_range(self, length:int, conn:int) -> List[str]:
av = int(length/conn)
r=[]
start = 0
r.append(f'{start}-{start+av}')
start+=av
if conn>1:
for n in range(conn-2):
r.append(f'{start+1}-{start+av}')
start+=av
r.append(f'{start+1}-{length}')
return r
def getfilename(self) -> str:
finalname = ""
name = ""
if self.dire:
if not self.name:
if self.tmpname:
finalname = f'{self.dire}/{randint(10,99)}{self.tmpname}'
else:
dd=self.header["content-type"].split("/")[1].split("+")[0]
finalname = f'{self.dire}/{randint(10,99)}{int(time())}.{dd}'
else:finalname = f'{self.dire}/{randint(10,99)}{self.name}'
else:
if not self.name:
if self.tmpname:
finalname = f'{randint(10,99)}{self.tmpname}'
else:
dd=self.header["content-type"].split("/")[1].split("+")[0]
finalname = f'{randint(10,99)}{int(time())}.{dd}'
else:finalname = f'{randint(10,99)}{self.name}'
for n in finalname:
if n not in '\\ /:*?"<>|':
name+=n
return name
def check_multi(self, protocol:str, url:str, host:str) -> Tuple:
req=self.gen_req(host,url)
sock=self.connect(protocol,host)
sock.sendall(req)
data=sock.recv(self.chunk)
self.header,image=self.hparsec(data)
if "content-length" in self.header.keys():
if int(self.header["status"]) != 200:
try:
sock.close()
name = self._Download(self.header["location"], dire=self.dire, name=self.name, status=self.status, chunk=self.chunk, connection=self.connection)
return "2",name
except Exception as err:
print(f"Error: {self.header['status']}")
print("We cant download from this URL Contact Admin with URL OR can't save with this file name")
sock.close()
sys.exit(1)
else: return "2",""
if "accept-ranges" in self.header.keys():
return "0",""
return "1",""
@classmethod
def _Download(cls,*args,**kwargs):
return cls(*args,**kwargs).start()
def connect(self, protocol:str, host:str) -> socket.socket:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if protocol=="https":
s.connect((host, 443))
s = ssl.create_default_context().wrap_socket(s, server_hostname=host)
elif protocol=="http":
s.connect((host, 80))
else:
print("we only support HTTP and HTTPS")
s.close()
sys.exit(1)
return s
def hparsec(self,data:bytes) -> Tuple[Dict[str,str], bytes]:
header = data.split(b'\r\n\r\n')[0]
store = data[len(header)+4:]
html = data[len(header)+4:]
header=header.decode().split("\r\n")
out={}
for n in header[1:]:
temp=n.split(":")
value=""
for n in temp[1:]:
value+=n+":"
out[temp[0].lower()]=value[1:len(value)-1]
out["status"]=header[0].split()[1]
return out,store
def gen_req(self, host:str, url:str, header:Dict[str,str] = {}) -> bytes:
req=f'GET {url} HTTP/1.1\r\nhost: {host}\r\nuser-agent: MayankFawkes/bot\r\nconnection: close\r\n'
for n, m in header.items():
req += f'{n}:{m}\r\n'
req+="\r\n"
return req.encode()
def RawData(self,web_url:str)-> Tuple[str, str, str]:
o=urlparse(web_url)
host=o.netloc
protocol=o.scheme
if o.query:
url=(o.path+"?"+o.query)
self.tmpname = ""
else:
url=o.path
self.tmpname = o.path.split("/")[-1]
return protocol, url, host
if __name__ == '__main__':
# link=input("Enter Url -->")
# link='https://storge.pic2.me/download/origin/257714.jpeg'
link="https://r1---sn-cnoa-cive.googlevideo.com/videoplayback?expire=1620579261&ei=Xb-XYI2VJ4ym1Aans7DwDw&ip=117.223.82.146&id=o-AFqRgtQS4OMPnxit7AMjqcXyPM0UswajM276SLhvP7uE&itag=250&source=youtube&requiressl=yes&mh=JP&mm=31%2C29&mn=sn-cnoa-cive%2Csn-cvh76nes&ms=au%2Crdu&mv=m&mvi=1&pl=22&initcwndbps=483750&vprv=1&mime=audio%2Fwebm&ns=jrlm7LOWoA-OCE--lu9Tjq0F&gir=yes&clen=186213948&dur=23997.321&lmt=1585553573314543&mt=1620557323&fvip=4&keepalive=yes&fexp=24001373%2C24007246&c=WEB&txp=5431432&n=uqYJ9ztW1v5Ju_v9uUe&sparams=expire%2Cei%2Cip%2Cid%2Citag%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cgir%2Cclen%2Cdur%2Clmt&sig=AOq0QJ8wRgIhAL1gDvY-1ZZmUvmAXDk5GYdwy6lvOrftJbGzpdMnXPEZAiEA-OHHfu0nrp8SdwTjhLnFeYdrepH2BC8Boga1Ja9sHm4%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRAIgM75vfG2SlIWKjujB-J6KSmYXqaxxzCnF0OBWic0y-LgCIBARz3ayIG1Pir0GGVYl4DEVYIodRlF74nPOoCqxN3NB"
# link="http://www.macaronisoup.com/songs/mp3/LoobyLoo.mp3"
# link = "https://portswigger.net/burp/releases/download?product=community&version=2020.11.2&type=WindowsX64"
dd=Download(link ,name = "test.webm", status = True, connection = 8, chunk = 5120).start()
print(dd)
|
onecard.py
|
import LED_display as LED
import threading
import time
import random
card=[[[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]],
[[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1],
[1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]],
[[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1],
[1,0,0,0,0,0,0,1,1,0,0,0,1,1,0,1],
[1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,1],
[1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]],
[[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,1,1,0,0,0,0,1,1,0,0,0,0,0,1],
[1,0,1,1,0,0,0,0,1,1,0,0,0,0,0,1],
[1,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1],
[1,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]],
[[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,1,1,0,0,0,1,1,0,0,0,1,1,0,1],
[1,0,1,1,0,0,0,1,1,0,0,0,1,1,0,1],
[1,0,0,0,1,1,0,0,0,0,1,1,0,0,0,1],
[1,0,0,0,1,1,0,0,0,0,1,1,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]]
def LED_init():
thread=threading.Thread(target=LED.main, args=())
thread.setDaemon(True)
thread.start()
return
LED_init()
while(1):
for i in range(8):
for j in range(16):
LED.screen[i][j]=card[4][i][j]
time.sleep(1)
for i in range(8):
for j in range(16):
LED.screen[i][j]=card[0][i][j]
time.sleep(1)
|
parallel.py
|
import sys
import time
import traceback
import threading
import utilities.common as utils
# change this to 1 to force single entry thread calls
min_items = 2
shutting_down = False
def set_shutting_down():
global shutting_down
shutting_down = True
in_parallel = 0
def set_in_parallel(val):
global in_parallel
if val:
in_parallel = in_parallel + 1
else:
in_parallel = in_parallel - 1
def get_in_parallel():
return in_parallel
def wait_for_threads(threads):
while True:
alive = False
for index, thread in enumerate(threads):
thread.join(timeout=1)
if thread.is_alive():
alive=True
if not alive or shutting_down:
break
def exec_foreach (use_threads, items, func, *args, **kwargs):
set_in_parallel(True)
retvals = list()
exceptions = list()
def _thread_func(index, *args, **kwargs):
try:
retvals[index] = func(*args, **kwargs)
exceptions[index] = None
except Exception as e1:
retvals[index] = None
exceptions[index] = traceback.format_exc()
except SystemExit as e2:
retvals[index] = None
exceptions[index] = e2
threads = list()
args_list = list(args)
args_list.insert(0, "")
args_list.insert(0, retvals)
index = 0
for item in items:
retvals.append(None)
exceptions.append(None)
args_list[0] = index
index = index + 1
args_list[1] = item
args = tuple(args_list)
if not use_threads or len(items) < min_items:
_thread_func(*args, **kwargs)
else:
x = threading.Thread(target=_thread_func, args=args, kwargs=kwargs)
threads.append(x)
x.start()
wait_for_threads(threads)
set_in_parallel(False)
for exp in exceptions:
if isinstance(exp, SystemExit):
sys.exit()
return [retvals, exceptions]
# remove this once refactored
class ExecAllFunc(utils.ExecAllFunc):
pass
# rename this once refactored
class ExecAllFunc_todo_rename(object):
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def exec_all(use_threads, entries, first_on_main=False):
set_in_parallel(True)
retvals = list()
exceptions = list()
def _thread_func(func, index, *args, **kwargs):
try:
retvals[index] = func(*args, **kwargs)
exceptions[index] = None
except Exception as e1:
retvals[index] = None
exceptions[index] = traceback.format_exc()
except SystemExit as e2:
retvals[index] = None
exceptions[index] = e2
f_args = None
f_kwargs = {}
threads = list()
index = 0
for entry in entries:
if isinstance(entry, utils.ExecAllFunc):
kwargs = entry.kwargs
entry2 = [entry.func]
entry2.extend(list(entry.args))
else:
kwargs = {}
entry2 = entry
entry2.insert(1, index)
index = index + 1
args = tuple(entry2)
retvals.append(None)
exceptions.append(None)
if not f_args and first_on_main:
f_args = args
f_kwargs = kwargs
elif not use_threads or len(entries) < min_items:
_thread_func(*args, **kwargs)
else:
x = threading.Thread(target=_thread_func, args=args, kwargs=kwargs)
threads.append(x)
x.start()
if first_on_main:
_thread_func(*f_args, **f_kwargs)
wait_for_threads(threads)
set_in_parallel(False)
for exp in exceptions:
if isinstance(exp, SystemExit):
sys.exit()
return [retvals, exceptions]
def exec_parallel(use_threads, items, func, kwarg_list,*args):
"""
Author:sooria.gajendrababu@broadcom.com
Info: parallel execution function for APIs with only kwargs
:param args:
:return:
Usage:
dict1 ={"local_asn":dut1_as,'neighbor_ip':enable_bfd_list_1,'config':'yes'}
dict2 ={"local_asn":dut3_as,'neighbor_ip':enable_bfd_list_2,'config':'yes'}
exec_parallel(True,[dut1,dut3],bfd.configure_bfd,[dict1,dict2])
"""
set_in_parallel(True)
retvals = list()
exceptions = list()
def _thread_func(index, *args, **kwargs):
try:
retvals[index] = func(*args, **kwargs)
exceptions[index] = None
except Exception as e1:
retvals[index] = None
exceptions[index] = traceback.format_exc()
except SystemExit as e2:
retvals[index] = None
exceptions[index] = e2
threads = list()
args_list = list(args)
args_list.insert(0, "")
args_list.insert(0, retvals)
index = 0
for item,kwargs in zip(items,kwarg_list):
retvals.append(None)
exceptions.append(None)
args_list[0] = index
index = index + 1
args_list[1] = item
args = tuple(args_list)
if not use_threads or len(items) < min_items:
_thread_func(*args, **kwargs)
else:
x = threading.Thread(target=_thread_func, args=args, kwargs=kwargs)
threads.append(x)
x.start()
wait_for_threads(threads)
set_in_parallel(False)
for exp in exceptions:
if isinstance(exp, SystemExit):
sys.exit()
return [retvals, exceptions]
class ExecuteBackgroud(object):
def __init__(self):
self.finished = False
self.func = None
self.args = ()
self.kwargs = ()
self.event = threading.Event()
self.event.clear()
self.t = threading.Thread(target=self._thread_func)
def start(self, func, *args, **kwargs):
self.finished = False
self.func = func
self.args = args
self.kwargs = kwargs
self.t.start()
def run(self):
self.event.set()
def stop(self):
self.finished = True
self.event.set()
time.sleep(1)
def is_valid(self):
return bool(self.func)
def _thread_func(self):
try:
while True:
self.event.wait()
if self.finished:
return
if self.func:
self.func(*self.args, **self.kwargs)
self.event.clear()
except Exception as e1:
print(e1)
except SystemExit as e2:
print(e2)
def ensure_no_exception(values):
"""
Importing st in function because this file has been imported by
framework so we cannot import framework API here
:param values:
:return:
"""
from spytest import st
for exp in values:
if exp is not None:
st.report_fail("exception_observed", exp)
return True
|
cobble.py
|
# Wrapper around Cobble native DLL
# TODO: Test on Windows (may need to use stdcall)
from ctypes import *
from enum import Enum, unique
import platform
import sys
import os
from queue import Queue, Empty
import signal
# Required for runloop
if platform.system() == 'Darwin':
from PyObjCTools import AppHelper
from threading import Thread
plugin_name = {
'Darwin': 'cobble_mac.dylib',
'Windows': 'Release/Windows/x64/Cobble.dll',
'Linux': 'cobble.so'
}
if platform.system() not in plugin_name.keys():
print("Platform {} does not have a corresponding Cobble library!")
sys.exit(-1)
plugin = cdll.LoadLibrary(os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../../../src/build/" + plugin_name[platform.system()]))
c_float_p = POINTER(c_float)
c_byte_p = POINTER(c_byte)
plugin.cobble_init.restype = None
plugin.cobble_deinit.restype = None
plugin.cobble_scan_start.restype = None
#plugin.cobble_scan_stop.restype = None
plugin.register_scanresult_cb.restype = None
plugin.cobble_connect.restype = None
plugin.cobble_connect.argtypes = [c_char_p]
plugin.cobble_subscribe.restype = None
plugin.cobble_subscribe.argtypes = [c_char_p]
plugin.cobble_write.restype = None
plugin.cobble_write.argtypes = [c_char_p, c_char_p, c_int]
# Windows only
plugin.cobble_queue_process.restype = None
#typedef void (*scanresult_funcptr)(const char*, int, const char*);
scanresults = Queue()
updatevalues = Queue()
characteristics = []
# Scan results from the library are sent via this callback
# For simplicity of use, we simply add to a queue
# Note that this means that results can be stale.
@CFUNCTYPE(None, c_char_p, c_int, c_char_p)
def scanresult_cb(name, rssi, identifier):
if name != None:
name = str(name, 'utf-8')
else:
name = "(none)"
identifier = str(identifier, "utf-8")
#print(f"Python received a scan result with name {name}, rssi {rssi}, identifier {identifier}")
scanresults.put((name, rssi, identifier))
plugin.register_scanresult_cb(scanresult_cb)
# Discovered characteristics are sent by the library via this callback
# For simplicity of use, we simply add to a list
# Note that this means that results can be stale.
@CFUNCTYPE(None, c_char_p, c_char_p)
def characteristicdiscovered_cb(service_uuid, characteristic_uuid):
service_uuid = str(service_uuid, 'utf-8')
characteristic_uuid = str(characteristic_uuid, 'utf-8')
print(f"Characteristic discovered: {service_uuid}, {characteristic_uuid}")
characteristics.append((service_uuid, characteristic_uuid,))
plugin.register_characteristicdiscovered_cb(characteristicdiscovered_cb)
# Characteristic value update notifications are sent by the library via this callback
@CFUNCTYPE(None, c_char_p, POINTER(c_char), c_int)
def updatevalue_cb(characteristic_uuid, data, length):
characteristic_uuid = str(characteristic_uuid, 'utf-8')
buf = bytes(b''.join([data[i] for i in range(length)]))
# print(f"Data received on {characteristic_uuid} is size {len(buf)}, value is " + repr(buf))
updatevalues.put((characteristic_uuid, buf))
plugin.register_updatevalue_cb(updatevalue_cb)
def init():
print("Cobble init")
plugin.cobble_init()
# TODO: Await cobble_status giving Initialised or Error...
pass
def start_scan():
print("Cobble start scan")
plugin.cobble_scan_start()
pass
def connect(name):
plugin.cobble_connect(name.encode('utf-8'))
pass
def get_scanresult():
try:
return scanresults.get(block=False)
except Empty:
return None
def get_updatevalue():
try:
return updatevalues.get(block=False)
except Empty:
return None
def subscribe(characteristic_uuid):
plugin.cobble_subscribe(characteristic_uuid.encode('utf-8'))
def write(characteristic_uuid, data):
assert isinstance(data, (bytearray, bytes))
data_converted = (c_char * len(data))(*data)
plugin.cobble_write(characteristic_uuid.encode('utf-8'), data_converted, len(data))
pass
def run_with(main_func):
print("Running main")
t = Thread(target=main_func)
t.daemon = True
t.start()
print("Running event loop on main thread...")
if platform.system() == 'Darwin':
try:
AppHelper.runConsoleEventLoop(installInterrupt=True)
except KeyboardInterrupt:
AppHelper.stopEventLoop()
pass
else:
try:
while(t.is_alive()):
plugin.cobble_queue_process()
except KeyboardInterrupt:
pass
pass
print("Cobble completed.")
os.kill(os.getpid(), signal.SIGINT)
|
plugtest.py
|
# -*- coding: utf-8 -*-
from Queue import Queue
import random
import socket
import threading
import unittest
from coapthon.messages.message import Message
from coapclient import HelperClient
from coapthon.messages.response import Response
from coapthon.messages.request import Request
from coapthon import defines
from coapthon.serializer import Serializer
from plugtest_coapserver import CoAPServerPlugTest
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class Tests(unittest.TestCase):
def setUp(self):
self.server_address = ("127.0.0.1", 5683)
self.current_mid = random.randint(1, 1000)
self.server_mid = random.randint(1000, 2000)
self.server = CoAPServerPlugTest("127.0.0.1", 5683, starting_mid=self.server_mid)
self.server_thread = threading.Thread(target=self.server.listen, args=(10,))
self.server_thread.start()
self.queue = Queue()
def tearDown(self):
self.server.close()
self.server_thread.join(timeout=25)
self.server = None
def _test_with_client(self, message_list): # pragma: no cover
client = HelperClient(self.server_address)
for message, expected in message_list:
if message is not None:
received_message = client.send_request(message)
if expected is not None:
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options is not None:
self.assertEqual(received_message.options, expected.options)
for o in expected.options:
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
client.stop()
def client_callback(self, response):
print "Callback"
self.queue.put(response)
def _test_with_client_observe(self, message_list, callback): # pragma: no cover
client = HelperClient(self.server_address)
token = None
last_mid = 0
for message, expected in message_list:
if message is not None:
token = message.token
client.send_request(message, callback)
received_message = self.queue.get()
if expected is not None:
last_mid = expected.mid
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options is not None:
self.assertEqual(received_message.options, expected.options)
message = Message()
message.type = defines.Types["RST"]
message.token = token
message._mid = last_mid
message.destination = self.server_address
client.send_empty(message)
client.stop()
def _test_plugtest(self, message_list): # pragma: no cover
serializer = Serializer()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for message, expected in message_list:
if message is not None:
datagram = serializer.serialize(message)
sock.sendto(datagram, message.destination)
if expected is not None:
datagram, source = sock.recvfrom(4096)
received_message = serializer.deserialize(datagram, source)
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, source)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options is not None:
self.assertEqual(received_message.options, expected.options)
for o in expected.options:
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
sock.close()
def test_td_coap_link_01(self):
print "TD_COAP_LINK_01"
path = "/.well-known/core"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = """</separate>;ct=0;if="separate",</large-update>;</seg1/seg2/seg3>;rt="Type1";sz="13",</large>;</seg1/seg2>;rt="Type1";sz="13",</test>;rt="Type1";sz="13",</obs>;</long>;</seg1>;rt="Type1";sz="13",</query>;rt="Type1";sz="13","""
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_link_02(self):
print "TD_COAP_LINK_02"
path = "/.well-known/core"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.uri_query = "rt=Type1"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = """</seg1/seg2/seg3>;rt="Type1";sz="13",</seg1/seg2>;rt="Type1";sz="13",</test>;rt="Type1";sz="13",</seg1>;rt="Type1";sz="13",</query>;rt="Type1";sz="13","""
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_01(self):
print "TD_COAP_CORE_01"
path = "/test"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Test Resource"
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_02(self):
print "TD_COAP_CORE_02"
path = "/test_post"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req.content_type = defines.Content_types["application/xml"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "<value>test</value>"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "/test_post"
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_03(self):
print "TD_COAP_CORE_03"
path = "/test"
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req.content_type = defines.Content_types["application/xml"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "<value>test</value>"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
self.current_mid += 1
exchange1 = (req, expected)
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Test Resource"
self.current_mid += 1
exchange2 = (req, expected)
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.accept = defines.Content_types["application/xml"]
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "<value>test</value>"
expected.content_type = defines.Content_types["application/xml"]
self.current_mid += 1
exchange3 = (req, expected)
self._test_with_client([exchange1, exchange2, exchange3])
def test_td_coap_core_04(self):
print "TD_COAP_CORE_04"
path = "/test"
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.DELETED.number
expected.token = None
expected.payload = None
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_05(self):
print "TD_COAP_CORE_05"
path = "/test"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["NON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["NON"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Test Resource"
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_06(self):
print "TD_COAP_CORE_06"
path = "/test_post"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["NON"]
req.content_type = defines.Content_types["application/xml"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "<value>test</value>"
expected = Response()
expected.type = defines.Types["NON"]
expected._mid = None
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.payload = None
expected.location_path = "/test_post"
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_07(self):
print "TD_COAP_CORE_07"
path = "/test"
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["NON"]
req.content_type = defines.Content_types["application/xml"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "<value>test</value>"
expected = Response()
expected.type = defines.Types["NON"]
expected._mid = None
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_08(self):
print "TD_COAP_CORE_08"
path = "/test"
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["NON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["NON"]
expected._mid = None
expected.code = defines.Codes.DELETED.number
expected.token = None
expected.payload = None
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_09(self):
print "TD_COAP_CORE_09"
path = "/separate"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = None
expected.token = None
expected.payload = None
expected2 = Response()
expected2.type = defines.Types["CON"]
expected2._mid = self.server_mid
expected2.code = defines.Codes.CONTENT.number
expected2.token = None
expected2.payload = "Separate Resource"
self.current_mid += 1
self._test_plugtest([(req, expected), (None, expected2)])
def test_td_coap_core_10(self):
print "TD_COAP_CORE_10"
path = "/test"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.token = "ciao"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Test Resource"
expected.token = "ciao"
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_12(self):
print "TD_COAP_CORE_12"
path = "/seg1/seg2/seg3"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.payload = "Test Resource"
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_core_13(self):
print "TD_COAP_CORE_13"
path = "/query?first=1&second=2&third=3"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Test Resource"
self.current_mid += 1
self._test_with_client([(req, expected)])
def test_td_coap_obs_01(self):
print "TD_COAP_OBS_01"
path = "/obs"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.observe = 0
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Observable Resource"
expected.observe = 1
expected2 = Response()
expected2.type = defines.Types["CON"]
expected2._mid = self.server_mid
expected2.code = defines.Codes.CONTENT.number
expected2.token = None
expected2.payload = "Observable Resource"
expected2.observe = 1
self.current_mid += 1
self.server_mid += 1
self._test_plugtest([(req, expected), (None, expected2)])
def test_td_coap_obs_03(self):
print "TD_COAP_OBS_03"
path = "/obs"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.observe = 0
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = "Observable Resource"
expected.observe = 1
self.current_mid += 1
expected2 = Response()
expected2.type = defines.Types["CON"]
expected2._mid = self.server_mid
expected2.code = defines.Codes.CONTENT.number
expected2.token = None
expected2.payload = "Observable Resource"
expected2.observe = 1
rst = Response()
rst.type = defines.Types["RST"]
rst._mid = self.server_mid
rst.code = defines.Codes.EMPTY.number
rst.destination = self.server_address
rst.token = None
rst.payload = None
self.current_mid += 1
self.server_mid += 1
self._test_plugtest([(req, expected), (None, expected2), (rst, None)])
def test_td_coap_block_01(self):
print "TD_COAP_BLOCK_01"
path = "/large"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.block2 = (0, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (0, 1, 1024)
exchange1 = (req, expected)
self.current_mid += 1
self.server_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.block2 = (1, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (1, 0, 1024)
exchange2 = (req, expected)
self.current_mid += 1
self.server_mid += 1
self._test_plugtest([exchange1, exchange2])
def test_td_coap_block_01_client(self):
print "TD_COAP_BLOCK_01"
path = "/large"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = None
req.destination = self.server_address
req.block2 = (0, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = """"Me sabbee plenty"—grunted Queequeg, puffing away at his pipe and sitting up in bed.
"You gettee in," he added, motioning to me with his tomahawk, and throwing the clothes to one side. He really did this
in not only a civil but a really kind and charitable way. I stood looking at him a moment. For all his tattooings
he was on the whole a clean, comely looking cannibal. What's all this fuss I have been making about, thought I to
myself—the man's a human being just as I am: he has just as much reason to fear me, as I have to be afraid of him.
Better sleep with a sober cannibal than a drunken Christian.
"Landlord," said I, "tell him to stash his tomahawk there, or pipe, or whatever you call it; tell him to stop smoking,
in short, and I will turn in with him. But I don't fancy having a man smoking in bed with me. It's dangerous. Besides,
I ain't insured."
This being told to Queequeg, he at once complied, and again politely motioned me to get into bed—rolling over to one
side as much as to say—"I won't touch a leg of ye."
"Good night, landlord," said I, "you may go."
I turned in, and never slept better in my life.
Upon waking next morning about daylight, I found Queequeg's arm thrown over me in the most loving and affectionate
manner. You had almost thought I had been his wife. The counterpane was of patchwork, full of odd little
parti-coloured squares and triangles; and this arm of his tattooed all over with an interminable Cretan labyrinth
of a figure, no two parts of which were of one precise shade—owing I suppose to his keeping his arm at sea
unmethodically in sun and shade, his shirt sleeves irregularly rolled up at various times—this same arm of his,
I say, looked for all the world like a strip of that same patchwork quilt. Indeed, partly lying on it as the arm did
when I first awoke, I could hardly tell it from the quilt, they so blended their hues together; and it was only by
the sense of weight and pressure that I could tell that Queequeg was hugging"""
expected.block2 = (1, 0, 1024)
exchange1 = (req, expected)
self.current_mid += 1
self.server_mid += 1
self._test_with_client([exchange1])
def test_td_coap_block_02_client(self):
print "TD_COAP_BLOCK_02"
path = "/large"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = None
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = None
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = """"Me sabbee plenty"—grunted Queequeg, puffing away at his pipe and sitting up in bed.
"You gettee in," he added, motioning to me with his tomahawk, and throwing the clothes to one side. He really did this
in not only a civil but a really kind and charitable way. I stood looking at him a moment. For all his tattooings
he was on the whole a clean, comely looking cannibal. What's all this fuss I have been making about, thought I to
myself—the man's a human being just as I am: he has just as much reason to fear me, as I have to be afraid of him.
Better sleep with a sober cannibal than a drunken Christian.
"Landlord," said I, "tell him to stash his tomahawk there, or pipe, or whatever you call it; tell him to stop smoking,
in short, and I will turn in with him. But I don't fancy having a man smoking in bed with me. It's dangerous. Besides,
I ain't insured."
This being told to Queequeg, he at once complied, and again politely motioned me to get into bed—rolling over to one
side as much as to say—"I won't touch a leg of ye."
"Good night, landlord," said I, "you may go."
I turned in, and never slept better in my life.
Upon waking next morning about daylight, I found Queequeg's arm thrown over me in the most loving and affectionate
manner. You had almost thought I had been his wife. The counterpane was of patchwork, full of odd little
parti-coloured squares and triangles; and this arm of his tattooed all over with an interminable Cretan labyrinth
of a figure, no two parts of which were of one precise shade—owing I suppose to his keeping his arm at sea
unmethodically in sun and shade, his shirt sleeves irregularly rolled up at various times—this same arm of his,
I say, looked for all the world like a strip of that same patchwork quilt. Indeed, partly lying on it as the arm did
when I first awoke, I could hardly tell it from the quilt, they so blended their hues together; and it was only by
the sense of weight and pressure that I could tell that Queequeg was hugging"""
expected.block2 = (1, 0, 1024)
exchange1 = (req, expected)
self.current_mid += 1
self.server_mid += 1
self._test_with_client([exchange1])
def test_td_coap_block_02(self):
print "TD_COAP_BLOCK_02"
path = "/large"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (0, 1, 1024)
exchange1 = (req, expected)
self.current_mid += 1
self.server_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.block2 = (1, 0, 1024)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = None
expected.block2 = (1, 0, 1024)
exchange2 = (req, expected)
self.current_mid += 1
self.server_mid += 1
self._test_plugtest([exchange1, exchange2])
def test_td_coap_block_03(self):
print "TD_COAP_BLOCK_03"
path = "/large-update"
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = """"Me sabbee plenty"—grunted Queequeg, puffing away at his pipe """
req.block1 = (0, 1, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTINUE.number
expected.token = None
expected.payload = None
expected.block1 = (0, 1, 64)
exchange1 = (req, expected)
self.current_mid += 1
self.server_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = """and sitting up in bed. "You gettee in," he added, motioning"""
req.block1 = (1, 0, 64)
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
exchange2 = (req, expected)
self.current_mid += 1
self.server_mid += 1
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.payload = """"Me sabbee plenty"—grunted Queequeg, puffing away at his pipe and sitting up in bed. "You gettee in," he added, motioning"""
exchange3 = (req, expected)
self.current_mid += 1
self._test_plugtest([exchange1, exchange2, exchange3])
def test_duplicate(self):
print "TEST_DUPLICATE"
path = "/test"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
self.current_mid += 1
self._test_plugtest([(req, expected), (req, expected)])
def test_duplicate_not_completed(self):
print "TEST_DUPLICATE_NOT_COMPLETED"
path = "/long"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = None
expected.token = None
expected2 = Response()
expected2.type = defines.Types["CON"]
expected2._mid = None
expected2.code = defines.Codes.CONTENT.number
expected2.token = None
self.current_mid += 1
self._test_plugtest([(req, None), (req, expected), (None, expected2)])
def test_no_response(self):
print "TEST_NO_RESPONSE"
path = "/long"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = None
expected.token = None
expected2 = Response()
expected2.type = defines.Types["CON"]
expected2._mid = None
expected2.code = defines.Codes.CONTENT.number
expected2.token = None
self.current_mid += 1
self._test_plugtest([(req, expected), (None, expected2), (None, expected2), (None, expected2)])
def test_edit_resource(self):
print "TEST_EDIT_RESOURCE"
path = "/obs"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.payload = "<value>test</value>"
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.payload = None
expected.location_path = "/obs"
self.current_mid += 1
self._test_with_client([(req, expected)])
if __name__ == '__main__':
unittest.main()
|
server.py
|
import time
import json
import logging
from threading import Thread
from datetime import datetime, timedelta
from functools import wraps
import serial
import schedule
import forecastio
import mandrill
from flask import Flask, request, jsonify
from influxdb.influxdb08 import InfluxDBClient
from twilio.rest import TwilioRestClient
try:
from flask.ext.cors import CORS # The typical way to import flask-cors
except ImportError:
# Path hack allows examples to be run without installation.
import os
parentDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentDir)
from flask.ext.cors import CORS
FILE_NAME = "config.json"
config = {}
current_forecast = {}
client = None
influxdb = None
ser = None
app = Flask(__name__, static_url_path='/static')
app.debug = True
cors = CORS(app)
class Throttle(object):
"""
Decorator that prevents a function from being called more than once every
time period.
To create a function that cannot be called more than once a minute:
@Throttle(minutes=1)
def my_fun():
pass
"""
def __init__(self, seconds=0, minutes=0, hours=0):
self.throttle_period = timedelta(
seconds=seconds, minutes=minutes, hours=hours
)
self.time_of_last_call = datetime.min
def __call__(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
now = datetime.now()
time_since_last_call = now - self.time_of_last_call
if time_since_last_call > self.throttle_period:
self.time_of_last_call = now
return fn(*args, **kwargs)
return wrapper
def compare(op, val1, val2):
if val1 is None or val2 is None:
return False
elif op == '>':
return val1 > val2
elif op == '>=':
return val1 >= val2
elif op == '<':
return val1 < val2
elif op == '<=':
return val1 <= val2
elif op == '=':
return val1 == val2
return False
def op_name(op):
if op == '>':
return 'higher'
elif op == '>=':
return 'higher or equal'
elif op == '<':
return 'lower'
elif op == '<=':
return 'lower or equal'
elif op == '=':
return 'equal'
return None
@Throttle(minutes=1)
def do_action(action, sensor_type, op, sensor_value, warning_value):
logging.debug(
'Executing action {action} sensor value {sensor} and warning value {warning}'.format(action=action,
sensor=sensor_value,
warning=warning_value))
message = \
'Warning: {type} value {sensor} is {op} than configured warning level {warning}'.format(type=sensor_type,
sensor=sensor_value,
op=op_name(op),
warning=warning_value)
if action == 'mail':
send_mail('aWarehouse warning', message)
elif action == 'sms':
send_sms(message)
def check_alerts(conf, sensors):
logging.debug('Checking alerts')
for w in conf['warnings']:
action = w['action']
op = w['op']
warning_value = w['value']
sensor_type = w['type']
sensor_value = get_sensor_value(sensors, sensor_type)
logging.debug('Checking alerts action {action} {sensor} {op} {warning}'.format(action=action,
sensor=sensor_value,
op=op, warning=warning_value))
if warning_value is None:
continue
if compare(op, sensor_value, warning_value):
do_action(action, sensor_type, op, sensor_value, warning_value)
def get_sensor_value(sensors, sensor_type):
names = {
'temperature': ['sensors', 'temp1'],
'humidity': ['sensors', 'humidity'],
'brightness': ['sensors', 'light_sensor'],
'heat': ['sensors', 'heat_index'],
'sound': ['sensors_fast', 'sound']
}
for sensor in sensors:
if sensor['name'] == names[sensor_type][0]:
return sensor['points'][0][sensor['columns'].index(names[sensor_type][1])]
return None
def load_file():
with open(FILE_NAME, "r") as data_file:
global config
config = json.load(data_file)
global client
client = TwilioRestClient(
config['twilio']['sid'], config['twilio']['token'])
global influxdb
influxdb = InfluxDBClient(config['db']['host'],
config['db']['port'], config['db']['username'],
config['db']['password'], config['db']['name'])
global ser
ser = serial.Serial(config['arduino']['com_port'], config['arduino']['baudrate'])
data_file.close()
def send_sms(content):
client.messages.create(
to='+' + str(config['twilio']['to']),
from_=config['twilio']['from'],
body=content,
)
def send_mail(subj, msg):
to = config['mandrill']['to']
key = config['mandrill']['token']
from_email = config['mandrill']['from']
kwargs = {'api_key': key,
'reply_to': from_email,
'recipient': 'Recipient',
'from_email': from_email
}
post_mail(to=to, msg=msg, subj=subj, **kwargs)
def post_mail(to, subj, msg, **kwargs):
""" Sends the message by posting to Mandrill API
@param to: the recipient for the message
@type to: str
@param subj: the subject for the email
@type subj: str
@param msg: the body of the message, in plain text
@type msg: str
@param kwargs: other settings, compliant with Mandrill API
@type kwargs: dict
@see: https://mandrillapp.com/api/docs/
"""
msg = {
'from_email': kwargs.get('from_email'),
'from_name': 'aWarehouse',
'html': '<h3>Automated Alert</h3><p>{msg}</p><h6>Sent via Mandrill API</h6>'.format(msg=msg),
'subject': subj,
'to': [
{'email': to,
'type': 'to'
}
]
}
mc = mandrill.Mandrill(kwargs.get('api_key'))
try:
res = mc.messages.send(msg, async=kwargs.get('async', False))
if res and not res[0].get('status') == 'sent':
logging.error('Could not send email to {to}; status: {status}, reason: {reason}'
.format(to=to, status=res.get('status', 'unknown'),
reason=res.get('reject_reason')))
exit(1)
except mandrill.Error, e:
# Mandrill errors are thrown as exceptions
logging.error('A mandrill error occurred: {} - {}'.format(e.__class__.__name__, e))
logging.info('Message sent to {to}'.format(to=to))
def get_sensors():
slow = get_sensors.counter == (
(
config['arduino']['read_sensors_timer'] / config['arduino']['read_sensors_fast_timer']) - 1)
if slow:
ser.write('r')
get_sensors.counter = 0
else:
ser.write('x')
get_sensors.counter += 1
json_info = ser.readline()
json_info = json_info.replace('\n', '')
json_info = json_info.replace('\r', '')
json_info = json_info.replace('\'', '\"')
m = json.loads(json_info)
if slow:
m.append(current_forecast)
try:
influxdb.write_points(m)
except:
logging.exception('Unexpected error InfluxDB')
check_alerts(config, m)
get_sensors.counter = 0
def get_meteo():
try:
forecast = forecastio.load_forecast(
config['forecast']['api'], config['forecast']['lat'],
config['forecast']['long'])
temp = forecast.currently().temperature
humi = forecast.currently().humidity * 100
except:
logging.exception('Unexpected error Forecast.io')
else:
global current_forecast
current_forecast = {
"points": [[temp, humi]],
"name": "forecastio",
"columns": ["temperature", "humidity"]
}
def run_schedule():
while 1:
schedule.run_pending()
time.sleep(1)
@app.route('/', methods=['GET'])
def index():
return app.send_static_file('index.html')
@app.route('/api/config', methods=['GET', 'POST'])
def get_api_config():
if request.method == 'GET':
return jsonify(config)
else:
data = request.json
with open(FILE_NAME, 'w') as outfile:
json.dump(data, outfile, indent=4)
load_file()
return "done"
@app.route('/config', methods=['GET'])
def configuration():
return app.send_static_file('configuration/index.html')
@app.route('/<path:path>')
def static_proxy(path):
# send_static_file will guess the correct MIME type
return app.send_static_file(path)
if __name__ == '__main__':
loglevel = logging.DEBUG
logging.basicConfig(format='%(asctime)-15s [%(levelname)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=loglevel)
logging.info('aWarehouse starting...')
load_file()
get_meteo() # init current_forecast
schedule.every(
config['arduino']['read_sensors_fast_timer']).seconds.do(get_sensors)
schedule.every(
config['arduino']['get_meteo_timer']).seconds.do(get_meteo)
t = Thread(target=run_schedule)
t.daemon = True
t.start()
app.run(debug=True, use_reloader=False, host='0.0.0.0', port=8080)
ser.close()
|
async_command.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tools for running command line processes compatibly with asyncio.
"""
import asyncio
import datetime
import logging
import pprint
import queue
import sys
import threading
import time
import traceback
import weakref
from functools import partial
from typing import Callable, AsyncIterator, Iterable, Union
__author__ = "Robert Harder"
__email__ = "rob@iharder.net"
__license__ = "Public Domain"
def main():
# An example
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(test())
loop.run_until_complete(
async_execute_command("cmd",
provide_stdin=AsyncReadConsole(),
handle_stdout=lambda x: print(f"{x.decode() if x else ''}", end="", flush=True),
handle_stderr=lambda x: print(f"{x.decode() if x else ''}", end="", file=sys.stderr,
flush=True)
)
)
async def test():
print("This will test the awaitiness.")
async def _heartbeat():
for _ in range(3):
print("♡", flush=True)
await asyncio.sleep(2)
asyncio.create_task(_heartbeat())
async with AsyncReadConsole() as arc:
resp = await arc.input("Say: ")
print(f"you said '{resp}'")
print("The prompt can be a function that updates each time it is displayed.")
async with AsyncReadConsole(
prompt=lambda: "{}: ".format(datetime.datetime.now()),
end=lambda x: f" ({len(x)})") \
as arc:
async for line in arc:
print(f"GOT: [{line}]", flush=True)
if line.startswith("EOF"):
break
class AsyncReadConsole:
"""An AsyncIterator that reads from the console."""
def __init__(self, prompt: Union[str, Callable] = None, end: Union[str, Callable] = None):
"""Creates a new AsyncReadConsole with optional default prompt.
The prompt can be a Callable function/lambda or a string or None.
If prompt is Callable, it will be called each time the prompt is
presented, making it possible to have "live" prompts. The prompt
can be a regular or async function.
The end parameter can be a callable function/lambda or a string or None.
If callable it can be either a coroutine or a regular function.
The line that is about to be sent is passed as an argument.
:param prompt: optional prompt
:param end: end character of a line, default is no end marker
"""
self.log = logging.getLogger(__name__ + "." + self.__class__.__name__)
self.main_loop: asyncio.BaseEventLoop = None
self.thread_loop: asyncio.BaseEventLoop = None
self.thread: threading.Thread = None
self.arc_stopping: bool = False
self.arc_stopping_evt: asyncio.Event = None
self.thread_stopped: asyncio.Event = None
self.self_started: bool = None
self.end: Union[str, Callable] = end # "\n" if end is None else end
self.prompt: Union[str, Callable] = prompt
self.prompt_queue: asyncio.Queue = None # on thread loop
self.input_queue: asyncio.Queue = None # on main loop
def __del__(self):
print(f"__del__, prompt_queue: {self.prompt_queue.qsize()}", flush=True)
while True:
try:
x = self.prompt_queue.get_nowait()
print(f"\t{x}", flush=True)
except asyncio.QueueEmpty:
break
print(f"__del__, input_queue: {self.input_queue.qsize()}", flush=True)
while True:
try:
x = self.input_queue.get_nowait()
print(f"\t{x}", flush=True)
except asyncio.QueueEmpty:
break
async def __aenter__(self):
# print("__aenter__", flush=True)
self.main_loop = asyncio.get_event_loop()
self.input_queue = asyncio.Queue()
_thread_ready_to_go = asyncio.Event()
self.thread_stopped = asyncio.Event()
self.arc_stopping_evt = asyncio.Event()
def _thread_run():
# print("_thread_run", flush=True)
self.thread_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.thread_loop)
async def _async_thread_run():
# print("_async_thread_run", flush=True)
try:
self.prompt_queue = asyncio.Queue()
self.main_loop.call_soon_threadsafe(_thread_ready_to_go.set) # Thread is up and running
while not self.arc_stopping:
# Do not proceed until someone actually wants a line of input
# because once the input() function is called, we're blocked there.
print(f"AWAITING prompt_queue.get()",flush=True)
prompt = await self.prompt_queue.get()
print(f"GOT PROMPT: '{prompt}'", flush=True)
await asyncio.sleep(0)
if self.arc_stopping:
print("STOPPING, SEND None TO INPUT_QUEUE", flush=True)
asyncio.run_coroutine_threadsafe(self.input_queue.put(None), self.main_loop)
break
line = None
try:
print("LISTENING FOR STDIN INPUT...",flush=True)
if prompt:
line = input(prompt)
else:
line = input()
except EOFError as ex:
print(f"EOFError {ex}", flush=True)
asyncio.run_coroutine_threadsafe(self.input_queue.put(ex), self.main_loop)
break
else:
print(f"ADD LINE TO INPUT QUEUE: {line}",flush=True)
asyncio.run_coroutine_threadsafe(self.input_queue.put(line), self.main_loop)
finally:
print("DONE WITH THIS ROUND OF INPUT")
# assert line is not None, "Did not expect line to be none"
if line is None:
print("DID NOT EXPECT THIS", flush=True)
break
print("LAST LINE WHILE LOOP", flush=True)
# await asyncio.sleep(0) # one last time to yield to event loop
self.thread_loop = None
except Exception as ex:
print("Error in _async_thread_run:", ex.__class__.__name__, ex, file=sys.stderr, flush=True)
traceback.print_tb(sys.exc_info()[2])
finally:
print("thread loop exiting")
self.thread_loop.run_until_complete(_async_thread_run())
print("_async_thread_run is complete")
self.main_loop.call_soon_threadsafe(self.thread_stopped.set)
print("_thread_run exiting")
if self.thread_loop is None:
self.thread = threading.Thread(target=_thread_run, name="Thread-console_input", daemon=True)
self.thread.start()
else:
raise Exception(f"{self.__class__.__name__} already has a support thread--was __aenter__ called twice?")
await _thread_ready_to_go.wait()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
print("__aexit__", flush=True)
if self.arc_stopping:
print("Already stopping", flush=True)
pass
else:
self.arc_stopping = True
self.arc_stopping_evt.set()
await self.input_queue.put(None)
if self.thread_loop is not None:
print("PUTTING ONE LAST NONE", flush=True)
asyncio.run_coroutine_threadsafe(self.prompt_queue.put(None), self.thread_loop)
return
def __aiter__(self) -> AsyncIterator[str]:
# print("__aiter__", flush=True)
return self
async def __anext__(self, prompt=None, end=None) -> str:
try:
print("__anext__", flush=True)
if self.arc_stopping:
raise StopAsyncIteration()
if self.main_loop is None: # Apparently __aenter__ was never called
_self_start_ready = asyncio.Event()
async def _self_start():
async with self as _:
self.self_started = True
_self_start_ready.set()
await self.arc_stopping_evt.wait()
asyncio.create_task(_self_start())
await _self_start_ready.wait()
# Resolve prompt
prompt = prompt or self.prompt
if asyncio.iscoroutinefunction(prompt):
prompt = await prompt()
elif callable(prompt):
prompt = prompt()
print(f"__anext__ is putting a prompt on the queue: '{prompt}' ...", flush=True)
asyncio.run_coroutine_threadsafe(self.prompt_queue.put(prompt), self.thread_loop)
print(f"__anext__ is awaiting the input queue...", flush=True)
line = await self.input_queue.get()
print(f"__anext__ got something from the input queue: '{line}'", flush=True)
if isinstance(line, Exception):
raise StopAsyncIteration(line) from line
if line is None:
print("LINE IS NONE, RAISING StopAsyncIteration", flush=True)
raise StopAsyncIteration()
else:
# Resolve ending
end = self.end if end is None else end
if asyncio.iscoroutinefunction(end):
end = await end(line)
elif callable(end):
end = end(line)
if end is not None:
line = f"{line}{end}"
return line
except StopAsyncIteration as sai:
if self.self_started:
await self.close()
raise sai
async def input(self, prompt=None, end=None):
line = None
try:
line = await self.__anext__(prompt=prompt, end=end)
except StopAsyncIteration:
line = None
finally:
return line
async def readline(self):
"""Reads a line of input. Same as input() but without a prompt."""
return await self.input()
async def close(self):
# print(self.__class__.__name__, "close() entrance", flush=True)
self.arc_stopping = True
self.arc_stopping_evt.set()
await self.input_queue.put(None)
if self.thread_loop:
asyncio.run_coroutine_threadsafe(self.prompt_queue.put(None), self.thread_loop)
print("WAITING FOR self.thread_stopped.wait()", flush=True)
await self.thread_stopped.wait()
print("ZZZ", self.__class__.__name__, "close() exit", flush=True)
async def async_execute_command(cmd, args: Iterable = (),
provide_stdin: AsyncIterator = None,
handle_stdout: Callable = None,
handle_stderr: Callable = None, daemon=True):
parent_loop = asyncio.get_event_loop()
parent_loop_tasks = weakref.WeakSet()
thread_done_evt = asyncio.Event()
output_callback_queue = asyncio.Queue()
async def _monitor_output_callback_queue():
while True:
try:
x = await output_callback_queue.get()
if x is None:
# We're all done -- shutdown
break
check = x.func if isinstance(x, partial) else x
if asyncio.iscoroutinefunction(check):
await x()
else:
x()
except Exception as ex:
print("Error in callback:", ex.__class__.__name__, ex, file=sys.stderr, flush=True)
traceback.print_tb(sys.exc_info()[2])
# print("DONE: _monitor_callback_queue", flush=True)
parent_loop_tasks.add(asyncio.create_task(_monitor_output_callback_queue()))
if sys.platform == 'win32':
proc_loop = asyncio.ProactorEventLoop()
else:
proc_loop = asyncio.new_event_loop() # Processes
asyncio.get_child_watcher() # Main loop
def _thread_run(_thread_loop: asyncio.BaseEventLoop):
# Running on thread that will host proc_loop
async def __run():
# Running within proc_loop
# asyncio.get_event_loop().set_debug(True)
try:
# print("Server is launching", cmd, *args, flush=True)
proc = await asyncio.create_subprocess_exec(
cmd, *args,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
async def __process_output(_out: asyncio.StreamReader, _output_callback: Callable):
# Runs within proc_loop
try:
while True:
buf = b''
line = None
while line is None:
try:
# Handle an incomplete line output such as when
# a command prompt leaves the input cursor at the end.
c = await asyncio.wait_for(_out.read(1), 0.1)
except asyncio.futures.TimeoutError:
if buf:
line = buf
# except Exception as ex:
# print("Exception", type(ex), ex, file=sys.stderr, flush=True)
# pass
else:
buf += c
if c == b'\n':
line = buf
# Handle EOF
elif c == b'':
line = buf
if line:
# First send whatever line we have left
part = partial(_output_callback, line)
asyncio.run_coroutine_threadsafe(output_callback_queue.put(part),
parent_loop)
# Then send a marker saying we're done
part = partial(_output_callback, None)
asyncio.run_coroutine_threadsafe(output_callback_queue.put(part), parent_loop)
return
if line:
part = partial(_output_callback, line)
asyncio.run_coroutine_threadsafe(output_callback_queue.put(part), parent_loop)
else:
break
except Exception as ex:
print("Error in __process_output:", ex.__class__.__name__, ex, file=sys.stderr, flush=True)
traceback.print_tb(sys.exc_info()[2])
async def __receive_input(_input_provider: AsyncIterator[str]):
# Runs in parent_loop
# asyncio.get_event_loop().set_debug(True)
async for __line in _input_provider:
proc.stdin.write(f"{__line}\n".encode())
proc.stdin.write_eof()
# input_done_evt.set()
tasks = []
if provide_stdin:
asyncio.run_coroutine_threadsafe(__receive_input(provide_stdin), parent_loop)
# parent_loop_tasks.add(parent_loop.create_task(input_done_evt.wait()))
if handle_stdout:
tasks.append(_thread_loop.create_task(__process_output(proc.stdout, handle_stdout)))
if handle_stderr:
tasks.append(_thread_loop.create_task(__process_output(proc.stderr, handle_stderr)))
# print("GATHERING...", flush=True)
await asyncio.gather(*tasks)
# print(f"GATHERED {pprint.pformat(tasks)}", flush=True)
except Exception as ex:
print(ex, file=sys.stderr, flush=True)
traceback.print_tb(sys.exc_info()[2])
asyncio.set_event_loop(_thread_loop)
_thread_loop.run_until_complete(__run())
parent_loop.call_soon_threadsafe(thread_done_evt.set)
# parent_loop.call_soon_threadsafe(input_done_evt.set)
print("Thread-proc run closed.", flush=True)
# Launch process is another thread, and wait for it to complete
threading.Thread(target=partial(_thread_run, proc_loop), name="Thread-proc", daemon=daemon).start()
await thread_done_evt.wait() # Waiting for proc_loop thread to finish
await output_callback_queue.put(None) # Signal that no more callbacks will be called
await asyncio.gather(*parent_loop_tasks) # Wait for all callbacks to finish
await asyncio.sleep(1)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
test_log_monitor.py
|
import multiprocessing as mp
import random
from multiprocessing import Process, Queue
from queue import Empty
from time import sleep
import pytest
import mnqueues as mnq
from mnqueues.log_monitor import LOGMonitor
def test_gcp_monitor():
g = LOGMonitor("test")
return True
def consumer(q: mnq.MNQueue):
for _ in range(10000):
try:
print(q.get(block=True, timeout=1))
sleep(0.01)
except Empty:
print("Empty queue, quiting")
break
print("consumer: get done, giving grace")
sleep(65)
print("consumer completed")
def producer(q: mnq.MNQueue):
for i in range(10000):
q.put(f"testing {i}..")
print("producer: put done, giving grace")
sleep(65)
print("producer completed")
def test_mp_basic():
q = mnq.MNQueue(monitor=LOGMonitor("test"))
p = Process(target=producer, args=(q,))
c = Process(target=consumer, args=(q,))
p.start()
c.start()
p.join()
c.join()
def test_mp_2():
q = mnq.MNQueue(monitor=LOGMonitor("test"))
p = Process(target=producer, args=(q,))
c1 = Process(target=consumer, args=(q,))
c2 = Process(target=consumer, args=(q,))
p.start()
c1.start()
c2.start()
p.join()
c1.join()
c2.join()
|
pico_project.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import argparse
import os
import shutil
from pathlib import Path
import sys
import subprocess
from time import sleep
import platform
import shlex
import csv
import tkinter as tk
from tkinter import messagebox as mb
from tkinter import filedialog as fd
from tkinter import simpledialog as sd
from tkinter import ttk
CMAKELIST_FILENAME='CMakeLists.txt'
COMPILER_NAME='arm-none-eabi-gcc'
VSCODE_LAUNCH_FILENAME = 'launch.json'
VSCODE_C_PROPERTIES_FILENAME = 'c_cpp_properties.json'
VSCODE_SETTINGS_FILENAME ='settings.json'
VSCODE_FOLDER='.vscode'
CONFIG_UNSET="Not set"
# Standard libraries for all builds
# And any more to string below, space separator
STANDARD_LIBRARIES = 'pico_stdlib'
# Indexed on feature name, tuple contains the C file, the H file and the Cmake project name for the feature
GUI_TEXT = 0
C_FILE = 1
H_FILE = 2
LIB_NAME = 3
features_list = {
'spi' : ("SPI", "spi.c", "hardware/spi.h", "hardware_spi"),
'i2c' : ("I2C interface", "i2c.c", "hardware/i2c.h", "hardware_i2c"),
'dma' : ("DMA support", "dma.c", "hardware/dma.h", "hardware_dma"),
'pio' : ("PIO interface", "pio.c", "hardware/pio.h", "hardware_pio"),
'interp' : ("HW interpolation", "interp.c", "hardware/interp.h", "hardware_interp"),
'timer' : ("HW timer", "timer.c", "hardware/timer.h", "hardware_timer"),
'watch' : ("HW watchdog", "watch.c", "hardware/watchdog.h", "hardware_watchdog"),
'clocks' : ("HW clocks", "clocks.c", "hardware/clocks.h", "hardware_clocks"),
}
stdlib_examples_list = {
'uart': ("UART", "uart.c", "hardware/uart.h", "hardware_uart"),
'gpio' : ("GPIO interface", "gpio.c", "hardware/gpio.h", "hardware_gpio"),
'div' : ("Low level HW Divider", "divider.c", "hardware/divider.h", "hardware_divider")
}
DEFINES = 0
INITIALISERS = 1
# Could add an extra item that shows how to use some of the available functions for the feature
#EXAMPLE = 2
# This also contains example code for the standard library (see stdlib_examples_list)
code_fragments_per_feature = {
'uart' : [
("// UART defines",
"// By default the stdout UART is `uart0`, so we will use the second one",
"#define UART_ID uart1",
"#define BAUD_RATE 9600", "",
"// Use pins 4 and 5 for UART1",
"// Pins can be changed, see the GPIO function select table in the datasheet for information on GPIO assignments",
"#define UART_TX_PIN 4",
"#define UART_RX_PIN 5" ),
( "// Set up our UART",
"uart_init(UART_ID, BAUD_RATE);",
"// Set the TX and RX pins by using the function select on the GPIO",
"// Set datasheet for more information on function select",
"gpio_set_function(UART_TX_PIN, GPIO_FUNC_UART);",
"gpio_set_function(UART_RX_PIN, GPIO_FUNC_UART);", "" )
],
'spi' : [
( "// SPI Defines",
"// We are going to use SPI 0, and allocate it to the following GPIO pins",
"// Pins can be changed, see the GPIO function select table in the datasheet for information on GPIO assignments",
"#define SPI_PORT spi0",
"#define PIN_MISO 16",
"#define PIN_CS 17",
"#define PIN_SCK 18",
"#define PIN_MOSI 19" ),
( "// SPI initialisation. This example will use SPI at 1MHz.",
"spi_init(SPI_PORT, 1000*1000);",
"gpio_set_function(PIN_MISO, GPIO_FUNC_SPI);",
"gpio_set_function(PIN_CS, GPIO_FUNC_SIO);",
"gpio_set_function(PIN_SCK, GPIO_FUNC_SPI);",
"gpio_set_function(PIN_MOSI, GPIO_FUNC_SPI);", "",
"// Chip select is active-low, so we'll initialise it to a driven-high state",
"gpio_set_dir(PIN_CS, GPIO_OUT);",
"gpio_put(PIN_CS, 1);", "")
],
'i2c' : [
(
"// I2C defines",
"// This example will use I2C0 on GPIO8 (SDA) and GPIO9 (SCL) running at 400KHz.",
"// Pins can be changed, see the GPIO function select table in the datasheet for information on GPIO assignments",
"#define I2C_PORT i2c0",
"#define I2C_SDA 8",
"#define I2C_SCL 9",
),
(
"// I2C Initialisation. Using it at 400Khz.",
"i2c_init(I2C_PORT, 400*1000);","",
"gpio_set_function(I2C_SDA, GPIO_FUNC_I2C);",
"gpio_set_function(I2C_SCL, GPIO_FUNC_I2C);",
"gpio_pull_up(I2C_SDA);",
"gpio_pull_up(I2C_SCL);"
)
],
"gpio" : [
(
"// GPIO defines",
"// Example uses GPIO 2",
"#define GPIO 2"
),
(
"// GPIO initialisation.",
"// We will make this GPIO an input, and pull it up by default",
"gpio_init(GPIO);",
"gpio_set_dir(GPIO, GPIO_IN);",
"gpio_pull_up(GPIO);","",
)
],
"interp" :[
(),
(
"// Interpolator example code",
"interp_config cfg = interp_default_config();",
"// Now use the various interpolator library functions for your use case",
"// e.g. interp_config_clamp(&cfg, true);",
"// interp_config_shift(&cfg, 2);",
"// Then set the config ",
"interp_set_config(interp0, 0, &cfg);",
)
],
"timer" : [
(
"int64_t alarm_callback(alarm_id_t id, void *user_data) {",
" // Put your timeout handler code in here",
" return 0;",
"}"
),
(
"// Timer example code - This example fires off the callback after 2000ms",
"add_alarm_in_ms(2000, alarm_callback, NULL, false);"
)
],
"watchdog":[ (),
(
"// Watchdog example code",
"if (watchdog_caused_reboot()) {",
" // Whatever action you may take if a watchdog caused a reboot",
"}","",
"// Enable the watchdog, requiring the watchdog to be updated every 100ms or the chip will reboot",
"// second arg is pause on debug which means the watchdog will pause when stepping through code",
"watchdog_enable(100, 1);","",
"// You need to call this function at least more often than the 100ms in the enable call to prevent a reboot"
"watchdog_update();",
)
],
"div" : [ (),
(
"// Example of using the HW divider. The pico_divider library provides a more user friendly set of APIs ",
"// over the divider (and support for 64 bit divides), and of course by default regular C language integer",
"// divisions are redirected thru that library, meaning you can just use C level `/` and `%` operators and",
"// gain the benefits of the fast hardware divider.",
"int32_t dividend = 123456;",
"int32_t divisor = -321;",
"// This is the recommended signed fast divider for general use.",
"divmod_result_t result = hw_divider_divmod_s32(dividend, divisor);",
"printf(\"%d/%d = %d remainder %d\\n\", dividend, divisor, to_quotient_s32(result), to_remainder_s32(result));",
"// This is the recommended unsigned fast divider for general use.",
"int32_t udividend = 123456;",
"int32_t udivisor = 321;",
"divmod_result_t uresult = hw_divider_divmod_u32(udividend, udivisor);",
"printf(\"%d/%d = %d remainder %d\\n\", udividend, udivisor, to_quotient_u32(uresult), to_remainder_u32(uresult));"
)
]
}
configuration_dictionary = list(dict())
isMac = False
isWindows = False
class Parameters():
def __init__(self, sdkPath, projectRoot, projectName, gui, overwrite, build, features, projects, configs, runFromRAM, examples, uart, usb):
self.sdkPath = sdkPath
self.projectRoot = projectRoot
self.projectName = projectName
self.wantGUI = gui
self.wantOverwrite = overwrite
self.wantBuild = build
self.features = features
self.projects = projects
self.configs = configs
self.wantRunFromRAM = runFromRAM
self.wantExamples = examples
self.wantUART = uart
self.wantUSB = usb
def GetBackground():
return 'white'
def GetButtonBackground():
return 'white'
def GetTextColour():
return 'black'
def GetButtonTextColour():
return '#c51a4a'
def RunGUI(sdkpath, args):
root = tk.Tk()
style = ttk.Style(root)
style.theme_use('default')
ttk.Style().configure("TButton", padding=6, relief="groove", border=2, foreground=GetButtonTextColour(), background=GetButtonBackground())
ttk.Style().configure("TLabel", foreground=GetTextColour(), background=GetBackground() )
ttk.Style().configure("TCheckbutton", foreground=GetTextColour(), background=GetBackground() )
ttk.Style().configure("TRadiobutton", foreground=GetTextColour(), background=GetBackground() )
ttk.Style().configure("TLabelframe", foreground=GetTextColour(), background=GetBackground() )
ttk.Style().configure("TLabelframe.Label", foreground=GetTextColour(), background=GetBackground() )
app = ProjectWindow(root, sdkpath, args)
app.configure(background=GetBackground())
root.mainloop()
sys.exit(0)
def RunWarning(message):
mb.showwarning('Raspberry Pi Pico Project Generator', message)
sys.exit(0)
class ChecklistBox(tk.Frame):
def __init__(self, parent, entries):
tk.Frame.__init__(self, parent)
self.vars = []
for c in entries:
# This var will be automatically updated by the checkbox
# The checkbox fills the var with the "onvalue" and "offvalue" as
# it is clicked on and off
var = tk.StringVar(value='') # Off by default for the moment
self.vars.append(var)
cb = ttk.Checkbutton(self, var=var, text=c,
onvalue=c, offvalue="",
width=20)
cb.pack(side="top", fill="x", anchor="w")
def getCheckedItems(self):
values = []
for var in self.vars:
value = var.get()
if value:
values.append(value)
return values
import threading
def thread_function(text, command, ok):
l = shlex.split(command)
proc = subprocess.Popen(l, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline,''):
if not line:
if ok:
ok["state"] = tk.NORMAL
return
text.insert(tk.END, line)
text.see(tk.END)
# Function to run an OS command and display the output in a new modal window
class DisplayWindow(tk.Toplevel):
def __init__(self, parent, title):
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.init_window(title)
def init_window(self, title):
self.title(title)
frame = tk.Frame(self, borderwidth=5, relief=tk.RIDGE)
frame.pack(fill=tk.X, expand=True, side=tk.TOP)
scrollbar = tk.Scrollbar(frame)
self.text = tk.Text(frame, bg='gray14', fg='gray99')
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.text.pack(side=tk.LEFT, fill=tk.Y)
scrollbar.config(command=self.text.yview)
self.text.config(yscrollcommand=scrollbar.set)
frame1 = tk.Frame(self, borderwidth=1)
frame1.pack(fill=tk.X, expand=True, side=tk.BOTTOM)
self.OKButton = ttk.Button(frame1, text="OK", command=self.OK)
self.OKButton["state"] = tk.DISABLED
self.OKButton.pack()
# make dialog modal
self.transient(self.parent)
self.grab_set()
def OK(self):
self.destroy()
def RunCommandInWindow(parent, command):
w = DisplayWindow(parent, command)
x = threading.Thread(target=thread_function, args=(w.text, command, w.OKButton))
x.start()
parent.wait_window(w)
class EditBoolWindow(sd.Dialog):
def __init__(self, parent, configitem, current):
self.parent = parent
self.config_item = configitem
self.current = current
sd.Dialog.__init__(self, parent, "Edit boolean configuration")
def body(self, master):
self.configure(background=GetBackground())
ttk.Label(self, text=self.config_item['name']).pack()
self.result = tk.StringVar()
self.result.set(self.current)
ttk.Radiobutton(master, text="True", variable=self.result, value="True").pack(anchor=tk.W)
ttk.Radiobutton(master, text="False", variable=self.result, value="False").pack(anchor=tk.W)
ttk.Radiobutton(master, text=CONFIG_UNSET, variable=self.result, value=CONFIG_UNSET).pack(anchor=tk.W)
def get(self):
return self.result.get()
class EditIntWindow(sd.Dialog):
def __init__(self, parent, configitem, current):
self.parent = parent
self.config_item = configitem
self.current = current
sd.Dialog.__init__(self, parent, "Edit integer configuration")
def body(self, master):
self.configure(background=GetBackground())
str = self.config_item['name'] + " Max = " + self.config_item['max'] + " Min = " + self.config_item['min']
ttk.Label(self, text=str).pack()
self.input = tk.Entry(self)
self.input.pack(pady=4)
self.input.insert(0, self.current)
ttk.Button(self, text=CONFIG_UNSET, command=self.unset).pack(pady=5)
def validate(self):
self.result = self.input.get()
# Check for numeric entry
return True
def unset(self):
self.result = CONFIG_UNSET
self.destroy()
def get(self):
return self.result
class EditEnumWindow(sd.Dialog):
def __init__(self, parent, configitem, current):
self.parent = parent
self.config_item = configitem
self.current = current
sd.Dialog.__init__(self, parent, "Edit Enumeration configuration")
def body(self, master):
#self.configure(background=GetBackground())
values = self.config_item['enumvalues'].split('|')
values.insert(0,'Not set')
self.input = ttk.Combobox(self, values=values, state='readonly')
self.input.set(self.current)
self.input.pack(pady=12)
def validate(self):
self.result = self.input.get()
return True
def get(self):
return self.result
class ConfigurationWindow(tk.Toplevel):
def __init__(self, parent, initial_config):
tk.Toplevel.__init__(self, parent)
self.master = parent
self.results = initial_config
self.init_window(self)
def init_window(self, args):
self.configure(background=GetBackground())
self.title("Advanced Configuration")
ttk.Label(self, text="Select the advanced options you wish to enable or change. Note that you really should understand the implications of changing these items before using them!").grid(row=0, column=0, columnspan=5)
ttk.Label(self, text="Name").grid(row=1, column=0, sticky=tk.W)
ttk.Label(self, text="Type").grid(row=1, column=1, sticky=tk.W)
ttk.Label(self, text="Min").grid(row=1, column=2, sticky=tk.W)
ttk.Label(self, text="Max").grid(row=1, column=3, sticky=tk.W)
ttk.Label(self, text="Default").grid(row=1, column=4, sticky=tk.W)
ttk.Label(self, text="User").grid(row=1, column=5, sticky=tk.W)
okButton = ttk.Button(self, text="OK", command=self.ok)
cancelButton = ttk.Button(self, text="Cancel", command=self.cancel)
self.namelist = tk.Listbox(self, selectmode=tk.SINGLE)
self.typelist = tk.Listbox(self, selectmode=tk.SINGLE)
self.minlist = tk.Listbox(self, selectmode=tk.SINGLE)
self.maxlist = tk.Listbox(self, selectmode=tk.SINGLE)
self.defaultlist = tk.Listbox(self, selectmode=tk.SINGLE)
self.valuelist = tk.Listbox(self, selectmode=tk.SINGLE)
self.descriptionText = tk.Text(self, state=tk.DISABLED, height=2)
## Make a list of our list boxes to make it all easier to handle
self.listlist = [self.namelist, self.typelist, self.minlist, self.maxlist, self.defaultlist, self.valuelist]
scroll = tk.Scrollbar(self, orient=tk.VERTICAL, command=self.yview)
for box in self.listlist:
box.config(width=0)
box.config(yscrollcommand=scroll.set)
box.bind("<MouseWheel>", self.mousewheel)
box.bind("<Button-4>", self.mousewheel)
box.bind("<Button-5>", self.mousewheel)
box.bind("<<ListboxSelect>>", self.changeSelection)
box.bind("<Double-Button>", self.doubleClick)
box.config(exportselection=False)
box.bind("<Down>", self.OnEntryUpDown)
box.bind("<Up>", self.OnEntryUpDown)
scroll.grid(column=7, sticky=tk.N + tk.S)
i = 0
for box in self.listlist:
box.grid(row=2, column=i, padx=0, sticky=tk.W + tk.E)
i+=1
self.descriptionText.grid(row = 3, column=0, columnspan=4, sticky=tk.W + tk.E)
cancelButton.grid(column=4, row = 3, sticky=tk.E, padx=5)
okButton.grid(column=5, row = 3, padx=5)
# populate the list box with our config options
for conf in configuration_dictionary:
self.namelist.insert(tk.END, conf['name'])
s = conf['type']
if s == "":
s = "int"
self.typelist.insert(tk.END, s)
self.maxlist.insert(tk.END, conf['max'])
self.minlist.insert(tk.END, conf['min'])
self.defaultlist.insert(tk.END, conf['default'])
# see if this config has a setting, our results member has this predefined from init
val = self.results.get(conf['name'], CONFIG_UNSET)
self.valuelist.insert(tk.END, val)
if val != CONFIG_UNSET:
self.valuelist.itemconfig(self.valuelist.size() - 1, {'bg':'green'})
def yview(self, *args):
for box in self.listlist:
box.yview(*args)
def mousewheel(self, event):
if (event.num == 4): # Linux encodes wheel as 'buttons' 4 and 5
delta = -1
elif (event.num == 5):
delta = 1
else: # Windows & OSX
delta = event.delta
for box in self.listlist:
box.yview("scroll", delta, "units")
return "break"
def changeSelection(self, evt):
box = evt.widget
sellist = box.curselection()
if sellist:
index = int(sellist[0])
config = self.namelist.get(index)
# Now find the description for that config in the dictionary
for conf in configuration_dictionary:
if conf['name'] == config:
self.descriptionText.config(state=tk.NORMAL)
self.descriptionText.delete(1.0,tk.END)
str = config + "\n" + conf['description']
self.descriptionText.insert(1.0, str)
self.descriptionText.config(state=tk.DISABLED)
break
# Set all the other list boxes to the same index
for b in self.listlist:
if b != box:
b.selection_clear(0, tk.END)
b.selection_set(index)
def OnEntryUpDown(self, event):
box = event.widget
selection = box.curselection()
if selection:
index = int(selection[0])
if event.keysym == 'Up':
index -= 1
elif event.keysym == 'Down':
index += 1
if 0 <= index < box.size():
for b in self.listlist:
b.selection_clear(0, tk.END)
b.selection_set(index)
b.see(index)
def doubleClick(self, evt):
box = evt.widget
index = int(box.curselection()[0])
config = self.namelist.get(index)
# Get the associated dict entry from our list of configs
for conf in configuration_dictionary:
if conf['name'] == config:
if (conf['type'] == 'bool'):
result = EditBoolWindow(self, conf, self.valuelist.get(index)).get()
elif (conf['type'] == 'int' or conf['type'] == ""): # "" defaults to int
result = EditIntWindow(self, conf, self.valuelist.get(index)).get()
elif conf['type'] == 'enum':
result = EditEnumWindow(self, conf, self.valuelist.get(index)).get()
# Update the valuelist with our new item
self.valuelist.delete(index)
self.valuelist.insert(index, result)
if result != CONFIG_UNSET:
self.valuelist.itemconfig(index, {'bg':'green'})
break
def ok(self):
# Get the selections, and create a list of them
for i, val in enumerate(self.valuelist.get(0, tk.END)):
if val != CONFIG_UNSET:
self.results[self.namelist.get(i)] = val
self.destroy()
def cancel(self):
self.destroy()
def get(self):
return self.results
# Our main window
class ProjectWindow(tk.Frame):
def __init__(self, parent, sdkpath, args):
tk.Frame.__init__(self, parent)
self.master = parent
self.sdkpath = sdkpath
self.init_window(args)
self.configs = dict()
def init_window(self, args):
self.master.title("Raspberry Pi Pico Project Generator")
self.master.configure(bg=GetBackground())
mainFrame = tk.Frame(self, bg=GetBackground()).grid(row=0, column=0, columnspan=6, rowspan=12)
# Need to keep a reference to the image or it will not appear.
self.logo = tk.PhotoImage(file=self._get_filepath("logo_alpha.gif"))
logowidget = ttk.Label(mainFrame, image=self.logo, borderwidth=0, relief="solid").grid(row=0,column=0, columnspan=5, pady=10)
namelbl = ttk.Label(mainFrame, text='Project Name :').grid(row=2, column=0, sticky=tk.E)
self.projectName = tk.StringVar()
if args.name != None:
self.projectName.set(args.name)
else:
self.projectName.set('ProjectName')
nameEntry = ttk.Entry(mainFrame, textvariable=self.projectName).grid(row=2, column=1, sticky=tk.W+tk.E, padx=5)
locationlbl = ttk.Label(mainFrame, text='Location :').grid(row=3, column=0, sticky=tk.E)
self.locationName = tk.StringVar()
self.locationName.set(os.getcwd())
locationEntry = ttk.Entry(mainFrame, textvariable=self.locationName).grid(row=3, column=1, columnspan=3, sticky=tk.W+tk.E, padx=5)
locationBrowse = ttk.Button(mainFrame, text='Browse', command=self.browse).grid(row=3, column=4)
# Features section
featuresframe = ttk.LabelFrame(mainFrame, text="Library Options", relief=tk.RIDGE, borderwidth=2)
featuresframe.grid(row=4, column=0, columnspan=5, rowspan=5, ipadx=5, padx=5, sticky=tk.E+tk.W)
# Add features to the list
v = []
for i in features_list:
v.append(features_list[i][GUI_TEXT])
s = (len(v)//3) + 1
self.featuresEntry0 = ChecklistBox(featuresframe, v[:s])
self.featuresEntry0.grid(row=5, column=1, padx=4)
self.featuresEntry1 = ChecklistBox(featuresframe, v[s:s+s])
self.featuresEntry1.grid(row=5, column=2, padx=4)
self.featuresEntry2 = ChecklistBox(featuresframe, v[s+s:])
self.featuresEntry2.grid(row=5, column=3, padx=4)
optionsRow = 9
# output options section
ooptionsSubframe = ttk.LabelFrame(mainFrame, relief=tk.RIDGE, borderwidth=2, text="Console Options")
ooptionsSubframe.grid(row=optionsRow, column=0, columnspan=5, rowspan=2, padx=5, pady=5, ipadx=5, ipady=3, sticky=tk.E+tk.W)
self.wantUART = tk.IntVar()
self.wantUART.set(args.uart)
ttk.Checkbutton(ooptionsSubframe, text="Console over UART", variable=self.wantUART).grid(row=0, column=0, padx=4, sticky=tk.W)
self.wantUSB = tk.IntVar()
self.wantUSB.set(args.usb)
ttk.Checkbutton(ooptionsSubframe, text="Console over USB (Disables other USB use)", variable=self.wantUSB).grid(row=0, column=1, padx=4, sticky=tk.W)
optionsRow += 2
# Code options section
coptionsSubframe = ttk.LabelFrame(mainFrame, relief=tk.RIDGE, borderwidth=2, text="Code Options")
coptionsSubframe.grid(row=optionsRow, column=0, columnspan=5, rowspan=2, padx=5, pady=5, ipadx=5, ipady=3, sticky=tk.E+tk.W)
self.wantExamples = tk.IntVar()
self.wantExamples.set(args.examples)
ttk.Checkbutton(coptionsSubframe, text="Add examples for Pico library", variable=self.wantExamples).grid(row=0, column=0, padx=4, sticky=tk.W)
self.wantRunFromRAM = tk.IntVar()
self.wantRunFromRAM.set(args.runFromRAM)
ttk.Checkbutton(coptionsSubframe, text="Run from RAM", variable=self.wantRunFromRAM).grid(row=0, column=1, padx=4, sticky=tk.W)
ttk.Button(coptionsSubframe, text="Advanced...", command=self.config).grid(row=0, column=4, sticky=tk.E)
optionsRow += 2
# Build Options section
boptionsSubframe = ttk.LabelFrame(mainFrame, relief=tk.RIDGE, borderwidth=2, text="Build Options")
boptionsSubframe.grid(row=optionsRow, column=0, columnspan=5, rowspan=2, padx=5, pady=5, ipadx=5, ipady=3, sticky=tk.E+tk.W)
self.wantBuild = tk.IntVar()
self.wantBuild.set(args.build)
ttk.Checkbutton(boptionsSubframe, text="Run build", variable=self.wantBuild).grid(row=0, column=0, padx=4, sticky=tk.W)
self.wantOverwrite = tk.IntVar()
self.wantOverwrite.set(args.overwrite)
ttk.Checkbutton(boptionsSubframe, text="Overwrite project", variable=self.wantOverwrite).grid(row=0, column=1, padx=4, sticky=tk.W)
self.wantVSCode = tk.IntVar()
ttk.Checkbutton(boptionsSubframe, text="Create VSCode project", variable=self.wantVSCode).grid(row=0, column=2, padx=4, sticky=tk.W)
optionsRow += 2
# OK, Cancel, Help section
# creating buttons
QuitButton = ttk.Button(mainFrame, text="Quit", command=self.quit).grid(row=optionsRow, column=3, padx=4, pady=5, sticky=tk.E)
OKButton = ttk.Button(mainFrame, text="OK", command=self.OK).grid(row=optionsRow, column=4, stick=tk.E, padx=10, pady=5)
# TODO help not implemented yet
# HelpButton = ttk.Button(mainFrame, text="Help", command=self.help).grid(row=optionsRow, column=0, pady=5)
# You can set a default path here, replace the string with whereever you want.
# self.locationName.set('/home/pi/pico_projects')
def GetFeatures(self):
features = []
f = self.featuresEntry0.getCheckedItems()
f += self.featuresEntry1.getCheckedItems()
f += self.featuresEntry2.getCheckedItems()
for feat in features_list:
if features_list[feat][GUI_TEXT] in f :
features.append(feat)
return features
def quit(self):
# TODO Check if we want to exit here
sys.exit(0)
def OK(self):
# OK, grab all the settings from the page, then call the generators
projectPath = self.locationName.get()
features = self.GetFeatures()
projects = list()
if (self.wantVSCode):
projects.append("vscode")
p = Parameters(self.sdkpath, Path(projectPath), self.projectName.get(), True, self.wantOverwrite.get(), self.wantBuild.get(),\
features, projects, self.configs, self.wantRunFromRAM.get(), \
self.wantExamples.get(),\
self.wantUSB.get(), self.wantUART.get())
DoEverything(self, p)
def browse(self):
name = fd.askdirectory()
self.locationName.set(name)
def help(self):
print("Help TODO")
def config(self):
# Run the configuration window
self.configs = ConfigurationWindow(self, self.configs).get()
def _get_filepath(self, filename):
return os.path.join(os.path.dirname(__file__), filename)
def CheckPrerequisites():
global isMac, isWindows
isMac = (platform.system() == 'Darwin')
isWindows = (platform.system() == 'Windows')
# Do we have a compiler?
return shutil.which(COMPILER_NAME)
def CheckSDKPath(gui):
sdkPath = os.getenv('PICO_SDK_PATH')
if sdkPath == None:
m = 'Unabled to locate the Raspberry Pi Pico SDK, PICO_SDK_PATH is not set'
if (gui):
RunWarning(m)
else:
print(m)
elif not os.path.isdir(sdkPath):
m = 'Unabled to locate the Raspberry Pi Pico SDK, PICO_SDK_PATH does not point to a directory'
if (gui):
RunWarning(m)
else:
print(m)
sdkPath = None
return sdkPath
def ParseCommandLine():
parser = argparse.ArgumentParser(description='Pico Project generator')
parser.add_argument("name", nargs="?", help="Name of the project")
parser.add_argument("-o", "--output", help="Set an alternative CMakeList.txt filename", default="CMakeLists.txt")
parser.add_argument("-x", "--examples", action='store_true', help="Add example code for the Pico standard library")
parser.add_argument("-l", "--list", action='store_true', help="List available features")
parser.add_argument("-c", "--configs", action='store_true', help="List available project configuration items")
parser.add_argument("-f", "--feature", action='append', help="Add feature to generated project")
parser.add_argument("-over", "--overwrite", action='store_true', help="Overwrite any existing project AND files")
parser.add_argument("-b", "--build", action='store_true', help="Build after project created")
parser.add_argument("-g", "--gui", action='store_true', help="Run a GUI version of the project generator")
parser.add_argument("-p", "--project", action='append', help="Generate projects files for IDE. Options are: vscode")
parser.add_argument("-r", "--runFromRAM", action='store_true', help="Run the program from RAM rather than flash")
parser.add_argument("-uart", "--uart", action='store_true', default=1, help="Console output to UART (default)")
parser.add_argument("-usb", "--usb", action='store_true', help="Console output to USB (disables other USB functionality")
return parser.parse_args()
def GenerateMain(folder, projectName, features):
filename = Path(folder) / (projectName + '.c')
file = open(filename, 'w')
main = ('#include <stdio.h>\n'
'#include "pico/stdlib.h"\n'
)
file.write(main)
if (features):
# Add any includes
for feat in features:
if (feat in features_list):
o = '#include "' + features_list[feat][H_FILE] + '"\n'
file.write(o)
if (feat in stdlib_examples_list):
o = '#include "' + stdlib_examples_list[feat][H_FILE] + '"\n'
file.write(o)
file.write('\n')
# Add any defines
for feat in features:
if (feat in code_fragments_per_feature):
for s in code_fragments_per_feature[feat][DEFINES]:
file.write(s)
file.write('\n')
file.write('\n')
main = ('\n\n'
'int main()\n'
'{\n'
' stdio_init_all();\n\n'
)
if (features):
# Add any initialisers
indent = 4
for feat in features:
if (feat in code_fragments_per_feature):
for s in code_fragments_per_feature[feat][INITIALISERS]:
main += (" " * indent)
main += s
main += '\n'
main += '\n'
main += (' puts("Hello, world!");\n\n'
' return 0;\n'
'}\n'
)
file.write(main)
file.close()
def GenerateCMake(folder, params):
cmake_header1 = ("# Generated Cmake Pico project file\n\n"
"cmake_minimum_required(VERSION 3.12)\n\n"
"set(CMAKE_C_STANDARD 11)\n"
"set(CMAKE_CXX_STANDARD 17)\n\n"
"# initalize pico_sdk from installed location\n"
"# (note this can come from environment, CMake cache etc)\n"
)
cmake_header2 = ("# Pull in Pico SDK (must be before project)\n"
"include(pico_sdk_import.cmake)\n\n"
)
cmake_header3 = (
"# Initialise the Pico SDK\n"
"pico_sdk_init()\n\n"
"# Add executable. Default name is the project name, version 0.1\n\n"
)
filename = Path(folder) / CMAKELIST_FILENAME
file = open(filename, 'w')
file.write(cmake_header1)
# OK, for the path, CMake will accept forward slashes on Windows, and thats
# seemingly a bit easier to handle than the backslashes
p = str(params.sdkPath).replace('\\','/')
p = '\"' + p + '\"'
file.write('set(PICO_SDK_PATH ' + p + ')\n\n')
file.write(cmake_header2)
file.write('project(' + params.projectName + ' C CXX)\n\n')
file.write(cmake_header3)
# add the preprocessor defines for overall configuration
if params.configs:
file.write('# Add any PICO_CONFIG entries specified in the Advanced settings\n')
for c, v in params.configs.items():
file.write('add_compile_definitions(-D' + c + '=' + v + ')\n')
file.write('\n')
# No GUI/command line to set a different executable name at this stage
executableName = params.projectName
file.write('add_executable(' + params.projectName + ' ' + params.projectName + '.c )\n\n')
file.write('pico_set_program_name(' + params.projectName + ' "' + executableName + '")\n')
file.write('pico_set_program_version(' + params.projectName + ' "0.1")\n\n')
if params.wantRunFromRAM:
file.write('# no_flash means the target is to run from RAM\n')
file.write('pico_set_binary_type(' + params.projectName + ' no_flash)\n\n')
# Console output destinations
if params.wantUART:
file.write('pico_enable_stdio_uart(' + params.projectName + ' 1)\n')
else:
file.write('pico_enable_stdio_uart(' + params.projectName + ' 0)\n')
if params.wantUSB:
file.write('pico_enable_stdio_usb(' + params.projectName + ' 1)\n\n')
else:
file.write('pico_enable_stdio_usb(' + params.projectName + ' 0)\n\n')
# Standard libraries
file.write('# Add the standard library to the build\n')
file.write('target_link_libraries(' + params.projectName + ' ' + STANDARD_LIBRARIES + ')\n\n')
# Selected libraries/features
if (params.features):
file.write('# Add any user requested libraries\n')
file.write('target_link_libraries(' + params.projectName + '\n')
for feat in params.features:
if (feat in features_list):
file.write(" " + features_list[feat][LIB_NAME] + '\n')
file.write(' )\n\n')
file.write('pico_add_extra_outputs(' + params.projectName + ')\n\n')
file.close()
# Generates the requested project files, if any
def generateProjectFiles(projectPath, projectName, sdkPath, projects):
oldCWD = os.getcwd()
os.chdir(projectPath)
for p in projects :
if p == 'vscode':
v1 = ('{\n'
' // Use IntelliSense to learn about possible attributes.\n'
' // Hover to view descriptions of existing attributes.\n'
' // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n'
' "version": "0.2.0",\n'
' "configurations": [\n'
' {\n'
' "name": "Cortex Debug",\n'
' "cwd": "${workspaceRoot}",\n'
' "executable": "${workspaceRoot}/build/' + projectName + '.elf",\n'
' "request": "launch",\n'
' "type": "cortex-debug",\n'
' "servertype": "openocd",\n'
' "device": "Pico2040",\n'
' "configFiles": [\n' + \
' "interface/raspberrypi-swd.cfg",\n' + \
' "target/rp2040.cfg"\n' + \
' ],\n' + \
' "svdFile": "' + str(sdkPath) + '/src/rp2040/hardware_regs/rp2040.svd",\n'
' "runToMain": true,\n'
' }\n'
' ]\n'
'}\n')
c1 = ('{\n'
' "configurations": [\n'
' {\n'
' "name": "Linux",\n'
' "includePath": [\n'
' "${workspaceFolder}/**",\n'
' "${env:PICO_SDK_PATH}/**"\n'
' ],\n'
' "defines": [],\n'
' "compilerPath": "/usr/bin/arm-none-eabi-gcc",\n'
' "cStandard": "gnu17",\n'
' "cppStandard": "gnu++14",\n'
' "intelliSenseMode": "gcc-arm"\n'
' }\n'
' ],\n'
' "version": 4\n'
'}\n')
s1 = ( '{\n'
' "cmake.configureOnOpen": false,\n'
' "cmake.statusbar.advanced": {\n'
' "debug" : {\n'
' "visibility": "hidden"\n'
' },'
' "launch" : {\n'
' "visibility": "hidden"\n'
' },\n'
' "build" : {\n'
' "visibility": "hidden"\n'
' },\n'
' "buildTarget" : {\n'
' "visibility": "hidden"\n'
' },\n'
' },\n'
'}\n')
# Create a build folder, and run our cmake project build from it
if not os.path.exists(VSCODE_FOLDER):
os.mkdir(VSCODE_FOLDER)
os.chdir(VSCODE_FOLDER)
filename = VSCODE_LAUNCH_FILENAME
file = open(filename, 'w')
file.write(v1)
file.close()
file = open(VSCODE_C_PROPERTIES_FILENAME, 'w')
file.write(c1)
file.close()
file = open(VSCODE_SETTINGS_FILENAME, 'w')
file.write(s1)
file.close()
else :
print('Unknown project type requested')
os.chdir(oldCWD)
def LoadConfigurations():
try:
with open("pico_configs.tsv") as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
for row in reader:
configuration_dictionary.append(row)
except:
print("No Pico configurations file found. Continuing without")
def DoEverything(parent, params):
if not os.path.exists(params.projectRoot):
if params.wantGUI:
mb.showerror('Raspberry Pi Pico Project Generator', 'Invalid project path. Select a valid path and try again')
return
else:
print('Invalid project path')
sys.exit(-1)
oldCWD = os.getcwd()
os.chdir(params.projectRoot)
# Create our project folder as subfolder
os.makedirs(params.projectName, exist_ok=True)
os.chdir(params.projectName)
projectPath = params.projectRoot / params.projectName
# First check if there is already a project in the folder
# If there is we abort unless the overwrite flag it set
if os.path.exists(CMAKELIST_FILENAME):
if not params.wantOverwrite :
if params.wantGUI:
# We can ask the user if they want to overwrite
y = mb.askquestion('Raspberry Pi Pico Project Generator', 'There already appears to be a project in this folder. \nPress Yes to overwrite project files, or Cancel to chose another folder')
if y != 'yes':
return
else:
print('There already appears to be a project in this folder. Use the --overwrite option to overwrite the existing project')
sys.exit(-1)
# We should really confirm the user wants to overwrite
#print('Are you sure you want to overwrite the existing project files? (y/N)')
#c = input().split(" ")[0]
#if c != 'y' and c != 'Y' :
# sys.exit(0)
# Copy the SDK finder cmake file to our project folder
# Can be found here <PICO_SDK_PATH>/external/pico_sdk_import.cmake
shutil.copyfile(params.sdkPath / 'external' / 'pico_sdk_import.cmake', projectPath / 'pico_sdk_import.cmake' )
if params.features:
features_and_examples = params.features[:]
else:
features_and_examples= []
if params.wantExamples:
features_and_examples = list(stdlib_examples_list.keys()) + features_and_examples
GenerateMain('.', params.projectName, features_and_examples)
GenerateCMake('.', params)
# Create a build folder, and run our cmake project build from it
if not os.path.exists('build'):
os.mkdir('build')
os.chdir('build')
cpus = os.cpu_count()
if cpus == None:
cpus = 1
if isWindows:
cmakeCmd = 'cmake -DCMAKE_BUILD_TYPE=Debug -G "NMake Makefiles" ..'
makeCmd = 'nmake -j ' + str(cpus)
else:
cmakeCmd = 'cmake -DCMAKE_BUILD_TYPE=Debug ..'
makeCmd = 'make -j' + str(cpus)
if params.wantGUI:
RunCommandInWindow(parent, cmakeCmd)
else:
os.system(cmakeCmd)
if params.projects:
generateProjectFiles(projectPath, params.projectName, params.sdkPath, params.projects)
if params.wantBuild:
if params.wantGUI:
RunCommandInWindow(parent, makeCmd)
else:
os.system(makeCmd)
print('\nIf the application has built correctly, you can now transfer it to the Raspberry Pi Pico board')
os.chdir(oldCWD)
###################################################################################
# main execution starteth here
args = ParseCommandLine()
# Check we have everything we need to compile etc
c = CheckPrerequisites()
## TODO Do both warnings in the same error message so user does have to keep coming back to find still more to do
if c == None:
m = 'Unable to find the `' + COMPILER_NAME + '` compiler\n'
m +='You will need to install an appropriate compiler to build a Raspberry Pi Pico project\n'
m += 'See the Raspberry Pi Pico documentation for how to do this on your particular platform\n'
if (args.gui):
RunWarning(m)
else:
print(m)
sys.exit(-1)
if args.name == None and not args.gui and not args.list and not args.configs:
print("No project name specfied\n")
sys.exit(-1)
# load/parse any configuration dictionary we may have
LoadConfigurations()
p = CheckSDKPath(args.gui)
if p == None:
sys.exit(-1)
sdkPath = Path(p)
if args.gui:
RunGUI(sdkPath, args) # does not return, only exits
projectRoot = Path(os.getcwd())
if args.list or args.configs:
if args.list:
print("Available project features:\n")
for feat in features_list:
print(feat.ljust(6), '\t', features_list[feat][GUI_TEXT])
print('\n')
if args.configs:
print("Available project configuration items:\n")
for conf in configuration_dictionary:
print(conf['name'].ljust(40), '\t', conf['description'])
print('\n')
sys.exit(0)
else :
p = Parameters(sdkPath, projectRoot, args.name, False, args.overwrite, args.build, args.feature, args.project, (), args.runFromRAM, args.examples, args.uart, args.usb)
DoEverything(None, p)
|
views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.core.files.storage import FileSystemStorage
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_protect
from .models import *
import json
import threading
import time
import os
import subprocess
import shutil
from shutil import copyfile
from datetime import date, datetime
import asyncio
import media.Hocr_Functionality as hocrf
import pandas as pd
from bs4 import BeautifulSoup
import cv2
import csv
#================ Login Check =====================
def login(request):
return render(request,"login.html")
def login_check(request):
name = request.POST['name']
pswd = request.POST['pswd']
if name == "user@abt.com" and pswd == "user":
print("Succes User")
return HttpResponse(json.dumps("Suceess_user"), content_type="application/json")
elif name == "admin@mediast.com" and pswd == "admin":
return HttpResponse(json.dumps("Suceess_process"), content_type="application/json")
else:
return HttpResponse(json.dumps("unSuceess"), content_type="application/json")
## Move images from input loction to project directory and displaying the images.
PInputaddress = r"D:\OCR\Helix Enterprise Engine v1.0\static\tempdata"
## Dictonarty Excel address
DictFile = r"D:\OCR\Helix Enterprise Engine v1.0\Dictonary.xlsx"
## Excel output file
ExcelOutputFile = r"output.xlsx"
##Excel Label Data
ExcelLabelData = r"D:\OCR\Helix Enterprise Engine v1.0\media\1library.csv"
##Image magicexe Address
ImageMagickAddress = r"C:\Program Files\ImageMagick-7.0.9-Q16\convert.exe"
#================= queue =========================
def copytree(src,i, dst = PInputaddress, symlinks=False, ignore=None):
if os.path.isdir(dst+"\\"+i.rstrip(".pdf")) == False:
os.mkdir(dst+"\\"+i.rstrip(".pdf"))
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst + "\\" + i.rstrip(".pdf"), item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def queue(request):
queuedata = viewdatafilepath.objects.all()
return render(request,"queue.html",{'queuedata':queuedata})
#================ Training Model ================
# def Training(request,id):
# return render(request,"TrainingEngine.html",{'id':id})
def Training(request,id):
return render(request,"TrainingEngine-V12.html",{'id':id})
def LalbelData(request):
queuedata = labeldata.objects.all()
SendLabel = []
for q in queuedata:
SendLabel.append(q.label)
print("asdsdsd",SendLabel)
sdata = { 'Labels' : SendLabel}
return HttpResponse(json.dumps(sdata), content_type="application/json")
def FindListOfHeaders(request):
f = open(r"D:\OCR\Helix Enterprise Engine v1.0\media\1library-1-Mapping.csv", "r")
reader = csv.reader(f)
headers = next(reader,None)
f.close()
sdata = { 'Labels' : headers}
return HttpResponse(json.dumps(sdata), content_type="application/json")
def FindSubList(request):
sublabel = request.POST['sublabel']
f = open(r"D:\OCR\Helix Enterprise Engine v1.0\media\1library-1-Mapping.csv", "r")
reader = csv.reader(f)
headers = next(reader,None)
f.close()
df = pd.read_csv(r"D:\OCR\Helix Enterprise Engine v1.0\media\1library-1-Mapping.csv", usecols=headers)
incoms = df[sublabel].values.tolist()
incoms = [incom for incom in incoms if str(incom) != 'nan']
sdata = { 'sublabels' : incoms}
return HttpResponse(json.dumps(sdata), content_type="application/json")
def PushValuesToCSV(request):
ALabelsArray = request.POST['ALabelsArray']
ASubLabelArray = request.POST['ASubLabelArray']
ANewLabelArray = request.POST['ANewLabelArray']
ALabelsArray = json.loads(ALabelsArray)
ASubLabelArray = json.loads(ASubLabelArray)
ANewLabelArray = json.loads(ANewLabelArray)
print(ALabelsArray,ASubLabelArray,ANewLabelArray)
f = open(r"D:\OCR\Helix Enterprise Engine v1.0\media\1library-1-Mapping.csv", "r")
reader = csv.reader(f)
headers = next(reader,None)
f.close()
df = pd.read_csv(r"D:\OCR\Helix Enterprise Engine v1.0\media\1library-1-Mapping.csv", usecols=headers)
i = 0
for La in ALabelsArray:
if La == "NewLabel":
df[ANewLabelArray[i]] = ''
df.set_value(len(df), ANewLabelArray[i], ASubLabelArray[i])
if ASubLabelArray[i] == "NewSub":
df.set_value(len(df), La, ANewLabelArray[i])
i += 1
df.to_csv(r'D:\OCR\Helix Enterprise Engine v1.0\media\1library-1-Mapping.csv',index=False)
# df.to_excel(r'D:\OCR_WEB_DEVELOPEMENT\Helix_OCR\media\Temp.csv',index=False)
return HttpResponse(json.dumps("sdata"), content_type="application/json")
def PushLables(request):
# ALabelArray = request.POST['ALabelArray']
# ASepArray = request.POST['ASepArray']
# ALabelArray = json.loads(ALabelArray)
# ASepArray = json.loads(ASepArray)
# print(ALabelArray)
# writer = pd.ExcelWriter(ExcelLabelData, engine='xlsxwriter')
# df2 = pd.DataFrame(ALabelArray)
# # df2 = df2.transpose()
# df2.to_excel(writer,header=False,index=False)
# writer.save()
# with open(ExcelLabelData, 'a') as file:
# writer = csv.writer(file)
# try:
# for La in ALabelArray:
# writer.writerow(['']+['']+['']+[La])
# except Exception as e:
# print(e)
filename = request.POST['filename']
# print(PInputaddress + "\\"+ filename + ".pdf")
try:
if os.path.isfile(PInputaddress + "\\"+ filename +"\\"+filename + ".hocr"):
print("====== engine funcion called =======")
extarctedoututdata = hocrf.mainfunction(PInputaddress + "\\"+ filename +"\\"+filename + ".hocr")
print("====== engine funcion ended =======")
#print(extarctedoututdata)
testd = viewdatafilepath.objects.filter(filename=filename).update(EngineData=extarctedoututdata)
print("======== Data Saved ================")
except Exception as e:
print(e)
return HttpResponse(json.dumps("rjdata"), content_type="application/json")
#================ data view ======================
def dataview(request,id):
#pippin = view_data_filepath.objects.create(filepath='Peregrin Took',EngineData=['apples', 'lembas bread', 'potatoes',['t','j']])
#pippin.save()
# print(id)
return render(request,"data_view.html",{'id':id})
def retivedatafromdb(request):
rid = request.POST['rid']
rdata = viewdatafilepath.objects.filter(dataid = rid)
for r in rdata:
finalarraytojson = TextToArray(r.EngineData)
filaname = r.filename
filepath = PInputaddress
listoffilename = []
for r, d, f in os.walk(filepath + "\\" + filaname.rstrip(".pdf") ):
for file in f:
if '.png' in file:
listoffilename.append(file)
fileNames = []
if len(listoffilename) == 0:
fileNames.append(listoffilename[0])
elif len(listoffilename) == 1:
fileNames.append(listoffilename[0])
else:
if len(listoffilename) > -1:
fileName = filaname.rstrip(".pdf")
for i in range(len(listoffilename)):
fileNames.append(fileName+"-"+str(i)+".png")
#tempdataarr = [[[[[' Date Qty “Unit Rate', 894, 1632, 977, 1679, 70], [':', 1509, 1620, 1521, 1680, 14], [' Gross Dise. Amt Net Amt', 1582, 1644, 1681, 1671, 93]]], [[[' SI', ' Particulars', ' | Date', ' Qty', ' “Unit Rate', ' :', ' Gross', ' Dise. Amt', ' Net', ' Amt'], [100, 100, 100, 100, 100]], [['', '', ' i', '', '', ' _', ' Amount |', '', '', ''], [100, 100, 100, 100, 100]], [[' :', ' Administrative', ' Charges', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 1', ' [Administrative', ' surcharge {29-10-2019', ' 1)', ' 3,708.00;', '', ' ~———3,708.00.', ' 185.40,', '', ''], [100, 100, 100, 100, 100]], [['', ' Sub Total :', '', '', '', '', ' 3,708.00', ' 185.40', '', ''], [100, 100, 100, 100, 100]], [[' .', ' Bed Charges', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 2 |', ' DELUXEROOMCHARGES', ' 25-10-2019', '', ' 4,500.00.', '', ' 4,500.00', ' «225.00,', '', ' «4,275.00.'], [100, 100, 100, 100, 100]], [[' 3', ' DELUXE', ' ROOM CHARGES 26-10-2019', ' 1', ' 4,500.00', '', ' 4,500.00', ' 225.00', '', ' 4 275.'], [100, 100, 100, 100, 100]], [[' 4', ' ‘DELUXE', ' ROOM CHARGES = : 27-10-2019', ' 1', ' 4,500.00', '', ' 4,500.00', ' 225.00', '', ' 4, 275,'], [100, 100, 100, 100, 100]], [[' 5', ' |DELUXEROOMCHARGES', ' 28-10-2019', ' 1', ' 4,500.00', '', ' 4,500.00', ' 225,00', '', ''], [100, 100, 100, 100, 100]], [[' 6', ' Room Charges', ' (Half Day} 29-10-2019', ' 1', ' 2,250,00', '', ' 2,200,00', ' 112,505', '', ' 2,1'], [100, 100, 100, 100, 100]], [['', ' Sub Total :', '', '','', '', ' 20,250.00', ' 1012.50', '', ''], [100, 100, 100, 100, 100]], [[' secant', '', ' ses', '', '', '', ' one', ' a', '', ' .'], [100, 100, 100, 100, 100]], [['', '‘Consultation', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 7', ' IP visit (Dr', ' Pushkar Vinayak 25-10-2019', ' 1', ' 650.00', '', ' 650,00!', ' 32.50', '', ''], [100, 100, 100, 100, 100]], [[' ,', ' Bhide}', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 8 IP', ' visit (Dr', ' Pushkar Vinayak 26-10-2019', ' 2:', ' 650.00', '', ' 1,300. 00.', ' 65.00:', '', ''], [100, 100, 100, 100, 100]]]], [[], [[[' Sl', ' Particulars', ' Date', ' Qty', ' Unit Rate', '', ' Gross', ' Disc. Amt |', ' Net', ' a'], [100, 100, 100, 100, 100]], [['', '', '', '', '', '', ' Amount', '', '', ''], [100, 100, 100, 100, 100]], [['', ' Consultation', '','', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 9', ' IP visit (Dr', ' Pushkar Vinayak - 27-10-2019', ' 1', ' ~~ 650.00,', '', ' «650.00', ' 32.50', '', ''], [100, 100, 100, 100, 100]], [['', ' Bhide)', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 10', ' IP visit (Dr', ' Priya Rajesh 28-F0-2019', ' 2', '', ' 650.00:', ' 1,300.00', ' 65.00', '', ''], [100, 100, 100, 100, 100]], [['', ' Mankare)', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 14', ' IP visit (Dr', ' Pushkat Vinayak 28-10-2019', ' 1', ' 650,00', '', ' 650.00;', ' 32.50', '', ''], [100, 100, 100, 100, 100]], [[' :', ' Bhide)', '', '', ' . -', '', '', '', '', ''], [100, 100, 100, 100, 100]], [['', ' Sub Total :', '', '', '', '', ' 4,550.00', ' 227.50', '', ''], [100, 100, 100, 100, 100]], [['', ' DRUGS', '', '', '', '','', '', '', ''], [100, 100, 100, 100, 100]], [[' 42', ' (|PANSEC40MGINJ', ' | 25-10-2019', ' 2', '', ' “48.79', ' «97.58.', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [[' i', ' AFM9059/', ' 31-05-2021', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 13', ' NS S500ML IV', ' | 93NI304072/ 25-10-2019', ' 1', ' 30.70', '',' 30.70', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' 31-08-2022', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 14', ' |EMESET INJ', ' 2MG/ML X 2ML 25-10-2019', ' 2', ' 12.81', '', ' 25.62', ' 0.00:', '', ''], [100, 100, 100, 100, 100]], [[' |', ' L690218/', ' 30-06-2022', '', '', '', ' :', '', '', ''], [100, 100, 100, 100, 100]], [[' 15', ' RLECTRAL', ' 25 SACHETS x 25-10-2019', ' 1', '', ' 19,39:', ' 19.39,', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [[' i', ' 21.80GM |', ' 039G017/', '', ' i', '', ' i', '', '', ''], [100, 100, 100, 100, 100]], [['', ' 3006-2021', ' |', '', ' 2', '', '', '', '', ''], [100, 100, 100, 100, 100]]]], [[[[' MRN', 192, 842, 280, 886, 92], [':', 519, 853, 523, 869, 0], [' ABI104583', 571, 848, 748, 877, 52]]], [[[' Sl -', ' "Particulars', ' Date', ' Oty', ' Unit Rate', '', ' ~ Gross', ' Disc. Amt', ' Net', ' Amt'], [100, 100, 100, 100, 100]], [['', '', ' :', '', '', '', ' Amount', '', '', ''], [100, 100, 100, 100, 100]], [['', ' DRUGS', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 18 |', ' DNS 500MLIV|', ' 93ni111003/ 25-10-2019', ' 3', '', ' 33.64!', ' 100.92,', '', ' 0.00;', ''],[100, 100, 100, 100, 100]], [[' i', ' 31-08-2022', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 17', ' CALPOL', ' TAB 15 S| EW578/ =| 25-10-2019', '15', ' 0.97', '', ' 14.55', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' 30-06-2022', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 18', ' DNS 500ML', ' IV | 93ni111003/ =: 26-10-2019', ' 3', ' 33.64', '', ' 100,92', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' 31-08-2022', '', '', '', '', ' . .', 'So', '', ''], [100, 100, 100, 100, 100]], [[' 19', ' {ELECTRAL', ' 25SACHETSx — 26-10-2019', ' 3', '', ' 19.39:', ' 58.17.', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' 21,80 GM |', ' 069H002/', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [['', ' 31-07-2021', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' :20', ' EMESET INJ', ' 2MG/ML X 2ML i 26-10-2019', ' 3:', ' 12,81', '', ' 38,43', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [[' : {', ' L690218/', ' 30-06-2022 i', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 21', ' MCOSVIR 75', ' MG CAP (X) | 26-10«2019', ' 10', ' 49.50', '', ' 495.00', ' 0.00,', '', ''],[100, 100, 100, 100, 100]], [['', ' MCO-035/', ' 31-07-2022', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 22', ' PANSEC', ' 40MG INI | 26-10-2019', ' 1', ' 48.79', '', ' 48,79', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' AFM9059/', ' 31-05-2021', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 23', ' SPORLAC', ' PLUS SACHET | 26-10-2019', ' 3:', ' 12,32', '', ' 36.96', ' 0.00:', '', ''], [100, 100, 100, 100, 100]], [['', ' 2AQ219002/', ' 31-07-2020', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 24', ' Z&D DS', ' 20MG (DRY SYP) 26-10-2019', ' 1', ' 80.50', '', ' 80.50', ' 0.00:', '', ''], [100, 100, 100, 100, 100]], [['', ' ISML | AH90052/', ' 31-05-2021:', ' :', ' :', '', '', ' :', '', ''], [100, 100, 100, 100, 100]]]], [[[[' MRN > ABI104583 Patient Name', 201, 855, 288, 897, 73], [':', 1701, 862, 1705, 880, 74], [' ADITYA BHARDWAJ', 1758, 857, 1898, 884, 72]], [[' Date', 1500, 913, 1573, 938, 94], [':', 1701, 916, 1705, 932, 72], [' 29/10/2019 11:06 AM', 1758, 911, 1924, 938, 74]], [[' Address', 201, 953, 330, 1003, 96], [':', 523, 970, 528, 986, 90], [' FLAT NO.402, MANDHAR, PRASUN DHAM, CHINCHWAD, , 411033, PUNE, Maharashtra, INDIA', 577, 965, 667, 992, 85]]], [[[' ‘Sl', ' Particulars', ' = ate', ' gty,', ' Wnt Rate,', '', ' Gross.', ' Disc. Amt |', ' Net', ' Amt'], [100, 100, 100, 100, 100]], [['', '', ' :', '', '', '', ' Amount', '', '', ''], [100, 100, 100, 100, 100]], [['', ' DRUGS', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 26 |', ' DNS 500MLIV|93ni111003/', ' | 27-10-2019', '', ' 333A.', '', ' 100,92;', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' 31-08-2022', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 26', ' ?ELECTRAL', ' 25 SACHETS x 27-10-2019', ' 2)', '', ' 19.39', ' 38.78', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' :21.80 GM |', ' 069HO002/ i', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' :', ' 31-07-2021', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 27', ' EMESET INJ', ' 2MG/ML X 2ML : 27-10-2019 :', ' 4', '', ' 12.81', ' . $1.24', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [[' i |',' 1.690218/', ' 30-06-2022 :', '', '', ' :', '', '', '', ''], [100, 100, 100, 100, 100]], [[' :28', ' NS 500ML', ' TV | 93NI304072/ 27-10-2019', ' 1', '', ' 30.70', ' 30.70', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' 31-08-2022', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' :29', ' PANSEC', ' 40MGINJ | 27-10-2019', ' 1', '', ' 48.79', ' 48.79', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [[' :', ' AFM9059/', ' 31-05-2021', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 30 i:', ' SPORLAC', ' PLUS SACHET | 27-10-2019', ' 3', '', ' 12.32', ' 36.96', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' :G2AQ219007/', ' 30-11-2020', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 31 |', ' ELECTRAL25SACHETSx', ' | 28-10-2019', ' 2', '', ' 19,39', ' 38.78:', ' 0.00:', '', ''], [100, 100, 100, 100, 100]], [[' :', ' 21.80 GM |', ' 069H002/ :', '', '', ' :', '', ' :', '', ''], [100, 100, 100, 100, 100]], [['', ' 31-07-2021', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 32 |', ' PANSEC', ' 40MG INI | 28-10-2019', ' ir', '', ' 49.79:', ' 49,79', ' 0.00:', '', ''], [100, 100, 100, 100, 100]], [['i', ' AFM9050/', ' 31-05-2021 :', '', '', '', '', ' :', '', ''], [100, 100, 100, 100, 100]], [[' THES IS', ' A SYSTEM', ' GENERATED REPORT', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' Prepared', ' By', ' Pravin Dabhade', '', '', '', '', ' Prepared On =', ' 29/10/2019', ''], [100, 100, 100, 100, 100]], [[' Generated', ' By', ' Pravin Dabhade', '', '', '', '', ' Generated On =.', ' 29/10/2019', ''], [100, 100, 100, 100, 100]]]], [[[[' Date Oty', 898, 1641, 985, 1684, 87], [':', 1258, 1626, 1269, 1686, 6], [' Unit Rate os Gross Disc. Amt Net Amt', 1321, 1648, 1386, 1674, 80]]], [[[' Sl', ' Particulars', ' Date', ' Oty :', ' Unit Rate', ' os', ' Gross',' Disc. Amt', ' Net', ' Amt'], [100, 100, 100, 100, 100]], [['', '', '', ' i', '', '', ' Amount', '', '', ''], [100, 100, 100, 100, 100]], [['', ' DRUGS', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 33°', ' EMESHT INJ', ' 2MG/ML X 2ML / 28-10-2019', '', ' 1 BL', '', ' 25.62', '', ' 0.00', ''], [100, 100, 100, 100, 100]], [[' |', ' L690218/', ' 30-06-2022', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 34', ' SPORLAC', ' PLUS SACHET | 28-10-2019', ' 1', ' 12,32', '',' 12.32:', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' G2A0219007/', ' 30-11-2020', '', '', '', ' :', '', '', ''], [100, 100, 100, 100, 100]], [[' :35', ' DNSS500ML', ' IV | 93ni111003/ =: 28-10-2019', ' 2', ' 33.64', '', ' 67,28', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' 31-08-2022', '', '', '', '', '', '', '',''], [100, 100, 100, 100, 100]], [['', ' Sub Total :', '', '', '', '', ' 1,648.71', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [[' :', ' Diet Items', '', '', '', '','', '', '', ''], [100, 100, 100, 100, 100]], [[' %', ' Mineral Water', ' Siege :', ' me', '', ' een', '', ' en', '', ''], [100, 100, 100, 100, 100]], [[' 37', ' MineralWater', ' 26-10-2019', ' 1', ' 20.00', '', ' 20,00', ' 1.00', '', ''], [100, 100, 100, 100, 100]], [[' 38 |', ' Veg Sandwich', ' 27-10-2019', ' I', ' 35.00.', '', ' = 35.00', ' 1.75', '', ''], [100, 100, 100, 100, 100]], [[' 39 «|', ' Tea', ' 27-10-2019', ' 2', ' ~', ' 18.00,', ' 36.00', ' 1.80', '', ''], [100, 100, 100, 100, 100]], [[' 40', ' Tea', ' 27-10-2019', ' 1', ' 18.00', '', ' 18.00', ' 0,90', '', ''], [100, 100, 100, 100, 100]], [[' 41', ' (Upma', ' 27-10-2019', ' 1', ' 40.00', '', ' 40.00',' 2.00:', '', ''], [100, 100, 100, 100, 100]], [['', ' Sub Total ;', '', '', '', '', ' 169.00', ' 8.45', '', ' -'], [100, 100, 100, 100, 100]]]], [[], [[[' Sl', ' Particulars', ' Date', ' Oty,', ' UnitRate 7', ' P', ' Gross |', ' Disc. Amt', ' Net', ' Amt'], [100, 100, 100, 100, 100]], [['', '', '', '', '', ' :', ' Amount', '', '', ''], [100, 100, 100, 100, 100]], [[' .', ' Laboratory', ' Services', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 42', ' (ELECTROLYTES', ' -BLOOD 25-10-2019', ' “Th', '', ' 850,00:', ' "$50.00.', ' 42,50', '', ' .'], [100, 100, 100, 100, 100]], [['', ' (Nak & Cl)', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 43 |', ' HAEMOGRAM', ' | 25-10-2019', ' “Le', '', ' 400,00', ' 400,00', ' 20.00', '', ''], [100, 100, 100, 100, 100]], [[' 44', ' ALANINE', ' TRANSAMINASE 25-10-2019', ' ert', ' 375,00', '', ' 375,00', ' 18.75', '', ''], [100, 100, 100, 100, 100]], [['', ' {ALT/SGPT)', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 45 |', ' RAPID', ' MALARIAL ANTIGEN 25-10-2019', ' 1', '', ' 923.00.', ' 923,00.', ' 46.15', '', ''], [100, 100, 100, 100, 100]], [[' ‘46', ' CRP(C-REACTIVE', ' 25-10-2019', '', ' 1 541.00', '', ' 541,00', ' 27.05', '', ''], [100, 100, 100, 100, 100]], [['', ' PROTEIN)-', ' Full Range :', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 47', ' FERRITIN', ' . 25-10-2019', ' i', ' 1,076.00', '', ' 1,076.00', ' 53.80:', '', ''], [100, 100, 100, 100, 100]], [[' 48', ' AEROBIC', ' BLOOD CULTURE. | 25-10-2019', '', ' 1 1,771.00', '', ' 1,771.00.', ' 88,55,', '', ''], [100, 100, 100, 100, 100]], [['', ' BY BACTALERT', ' RAPID', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [['', ' METHOD', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 49°', ' HAEMOGRAM', ' 26-10-2019', '', ' 429.00', '', ' 449.00,', ' 22.45,', '', ''], [100, 100, 100, 100, 100]], [[' 136°"', ' ngs', ' Anboay Gants 7° i019', '', ' eee', '', ' vases', '', ' areas', ''], [100, 100, 100, 100, 100]],[[' "51', ' Dengue', ' Antigen (NS1) "26-10-2019', ' 1', '', '', ' 600.00:', '', '', ''], [100, 100, 100, 100, 100]], [[' 52', ' Dengue Antibody', ' (IgG) 2610-2019', 'Le', '', '', ' 600.00', ' 30', '', ' .'], [100, 100, 100, 100, 100]], [[' 53', ' HAEMOGRAM', ' 27-10-2019', ' L:', '', '', ' "449.00:', '', '', ''], [100, 100, 100, 100, 100]], [[' THIS IS', ' A SYSTEM', ' GENERATED REPORT', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' Prepared', ' By', ' Pravin Dabhade', '', '', '', '', ' Prepared On', ' 29/10/2019', ''], [100, 100, 100, 100, 100]]]], [[], [[[' “Sl', ' Particulars', ' Date', ' Oty', ' Unit Rate', '', ' Gross', ' Disc. Amt |', ' Net', ' Amt'], [100, 100, 100, 100, 100]], [['', '', '', '', '', '', ' Amount', '', '', ''], [100, 100, 100, 100, 100]], [['', ' Laboratory', ' Services', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 55', ' HAEMOGRAM', ' 29-10-2019', ' 1', ' 449.00', '', ' 449.00', '', '', ''], [100, 100, 100, 100, 100]], [['', ' Sub Total :','', '', '', '', ' 9,532.00', ' 476.60', ' 9055.399999999', ''], [100, 100, 100, 100, 100]], [['', ' MATERIALS', '', ' |', '', ' -', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 56', ' SYRINGE', ' SML (ROMO JET)| 25-10-2019', ' 4', ' 7.50', '', ' 30.00:', ' 0.00;', '', ''], [100, 100, 100, 100, 100]], [['', ' G39294/', ' 30-08-2023', '', '', '', ' i', ' :', '', ''], [100, 100, 100, 100, 100]], [[' 87', ' =SYRINGE', ' 2ML (ROMO JET) | | 25-10-2019', ' I', '', ' 6.00.', ' 6.00;', ' 0.00:', '', ''], [100, 100, 100, 100, 100]], [['', ' G38299/', ' 31-05-2023', '', '', ' i', ' :', '', '', ''], [100, 100, 100, 100, 100]], [[' 58', ' SYRINGE', ' 1OML (ROMO JET) | 25-10-2019', ' 4', '', ' 12.00,', ' 48.00;', ' 0,00', '', ''], [100, 100, 100, 100, 100]], [[' |', ' G393 16/', ' 31-08-2023', '', '', '', '', '', '', ''], [100, 100, 100, 100,100]], [[' 59', ' Q-SYTE', ' ACACIA BI- 25-10-2019', ' 1', ' 391.00', '', ' 391.00', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [[' :', ' EXTENSION', ' STET REF', '', '', '', ' i', '', '', ''], [100, 100, 100, 100, 100]], [['', ' 385163 |', ' 9014666/ 31-12-2021:', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 60', 'IV SET', ' (ROMSON) | G37840/ | 25-10-2019', ' 1', ' 293.06', '', ' 293.00', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [['', ' 31-03-2023', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' :61', ' =INTROCAN', ' WG-24 | . 25-10-2019', ' 1', ' 250.00', '', ' 250.00', ' 0.00,', '', ''], [100, 100, 100, 100, 100]], [[' i', ' 18E24G8391/', ' 31-05-2023', '', '', ' i', '', ' i', '', ''], [100, 100, 100, 100, 100]]]], [[], [[[' sl', ' Particulars', ' Date', ' Oty,', ' ‘Unit Rate', '', ' Gross', ' ‘Disc. Amt |', ' Net', ' Amt'], [100, 100, 100, 100, 100]], [['', '', ' i', '', '', ' :', ' Amount', ' i', '', ''], [100, 100, 100, 100, 100]], [['', ' MATERIALS', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 62', ' HANDRUBCHG100ML|', ' (25-10-2019', '', '', ' 99000,', ' 220.00', ' 000°', '', ''], [100, 100,100, 100, 100]], [['', ' RO7190625/', ' 30-06-2021', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 63', ' GLOVES', ' MICRO OPTIC 25-10-2019', ' Ll:', ' 95.00', '', ' 95,00', ' 6.00', '', ''], [100, 100, 100, 100, 100]], [['', ' STERILE', ' NO.-6.5 |', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [['', ' 1909055605/', ' 30-09-2022', '', '', ' :', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 64.', ' COTTON.BALLS', ' .5GM X 200 | 25-10-2019', ' ]', ' 35.00', '', ' 35,00', '0.00', '', ''], [100, 100, 100, 100, 100]], [[' |', ' 0319366L/', ' 29-02-2024', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 65', ' COTTON', ' BALLS .5GM XK 200 : 25-10-2019 :', ' 1', ' 35.00', '', ' 35,00', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [[' |', ' 0319366L/', ' 29-02-2024', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 66', ' SYRINGE', ' 5ML (ROMO JET) | 26-10-2019', ' 3:', ' 7.50', '', ' 37.50', ' 0.00:', '', ''], [100, 100, 100, 100, 100]], [['', ' G39294/', ' 30-08-2023', ' :', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 67', ' SYRINGE', ' 2ML (ROMO JET)| : 26-10-2019', ' 4.', ' 6,00.', '', ' 24,00:', ' 9.00', '', ''], [100, 100, 100, 100, 100]], [['', ' G38299/', ' 31-05-2023', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 68', ' URINE POT', ' MALE 26-10-2019', ' ]', ' 70.00', '', ' 70.00', ' 0,00 :', '', ''], [100, 100, 100, 100, 100]], [['', ' (PLASTIC) |', ' URINE POT', '', '', '', '', ' :', '', ''], [100, 100, 100, 100, 100]], [['', ' MALE/', ' 28-02-2022', '', '', '', ' :', '', '', ''], [100, 100, 100, 100, 100]], [[' :69', ' SYRINGE', ' 2ML (ROMO JET} | : 27-10-2019', ' 5:', '', ' 6.00:', ' 30.00:', ' 0.00', '', ''], [100, 100, 100, 100, 100]], [[' :', ' :G38299/', ' 31-05-2023', '', '', '', ' i', '', '', ''], [100, 100, 100, 100, 100]], [[' THIS IS', ' A SYSTEM', ' GENERATED REPORT', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' Prepared', ' By', ' Pravin Dabhade', '', '', '', '', ' PreparedOn =', ' 29/10/2019', ''], [100, 100, 100, 100, 100]]]], [[], [[[' sl', ' Particulars', ' Date', ' Oty |', ' Unit Rate', '', ' Gross', ' ise. Amt', ' Net', ' Amt'], [100, 100, 100, 100, 100]], [['', '', '', ' :', '', '', ' Amount', '', '', ''], [100, 100, 100, 100, 100]], [[' :', ' MATERIALS', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 70 |', ' SYRINGE', ' 5ML (ROMO JET)| (27-10-2019', ' 1', ' 7.50', '', ' 7.50,', ' 0.00:', '', ' ~'], [100, 100, 100, 100, 100]], [['', ' G39294/', ' 30-08-2023 :', '', '', '', '', ' i', '', ''], [100, 100, 100, 100, 100]], [['', ' Sub Total :', '', '', '', '', ' 1,572.00', ' 6.00', '', ''], [100, 100, 100, 100, 100]], [[' i .', ' ‘Nursing Care', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [[' 71', ' Nursing', ' Charges — 25-10-2019 :', ' Li', ' 550.00', '', ' 530.00','', ' :', ''], [100, 100, 100, 100, 100]], [[' 72 =|', ' Nursing', ' Charges 26-10-2019', ' 1', ' 550.00', '', ' 550.00', '', '', ''], [100, 100, 100, 100, 100]], [[' 73', ' Nursing', ' Charges (27-10-2019', ' 1', ' 550.00', '', ' 550.00', '', '', ''], [100, 100, 100, 100, 100]], [[' 74 |', ' Nursing', ' Charges 28-10-2019', ' 1', '', ' 550.00.', ' 550.00', '', '', ''], [100, 100, 100, 100, 100]], [[' 75', ' Nursing', ' Charges 29-10-2019', ' L', ' 550.00', '', ' $50.00:', '', '', ''], [100, 100, 100,100, 100]], [['', ' Sub Total :', '', '', '', '', ' 2,750.00', ' 137.50', '', ''], [100, 100, 100, 100, 100]], [['', ' Others', '', '', '', '', '', '', '', ''], [100, 100, 100, 100, 100]], [['', ' “Biomedical', ' Waste Charges 29-10-2019', ' L', '', ' 50.00.', ' 50.00:', ' 2.50.', '', ''], [100, 100, 100, 100, 100]], [['', ' Sub Total:', '', '', '', '', ' 50.00', ' 2.50', '', ''], [100, 100, 100, 100, 100]]]], [[[[' MRN * AB1104583 Patient Name', 202, 853, 290, 897, 78], [':', 1702, 868, 1707, 884, 29], [' ADITYA BHARDWAJ', 1759, 863, 1901, 892, 54]], [[' Address', 201, 959, 330, 1002, 94], [':', 525, 970, 529, 986, 56], [' FLAT NO,402, MANDHAR, PRASUN DHAM, CHINCHWAD, , 411033, PUNE, Maharashtra, INDIA', 579, 971, 670, 998, 88]], [[' Discharge Summary', 1367, 1184, 1522, 1255, 76], [':', 1673, 1184, 1702, 1240, 60], [' 24 |', 1746,1199, 1815, 1283, 75]]], [[['', '', '', '', '', '', '', ' Discount :', '', ''], [100, 100, 100, 100, 100]], [['', '', '', '', '', '', '', ' Total :', '', ''], [100, 100, 100, 100, 100]], [['', '', '', '', '', '', '', ' Sponsor Amount :', '', ''], [100, 100, 100, 100, 100]], [['', '', ' —_', '', ' -', '', ' Co-Payment/Deductible', ' ;','', ''], [100, 100, 100, 100, 100]], [[' SINe.', ' | Date', ' Receipt No', '', '', '', ' Original Amt.', '', ' Adjusted', ''], [100, 100, 100, 100, 100]], [[' Pon', '',' eres', '', ' ene', '', '', '', '', ''], [100, 100, 100, 100, 100]]]]]
#for r in rdata:
# rjdata = {'edata' : r.EngineData,'totalpages':r.totalpages,'dataextracted': r.dataextracted,'dataaccuracy':r.dataaccuracy,'noisepercentage':r.noisepercentage,'filename':r.filename,'totalpages':r.totalpages}
rjdata = {'edata' : finalarraytojson,'imagefilename' : fileNames, 'filename' : filaname,'totalnumberofpages':len(listoffilename)}
return HttpResponse(json.dumps(rjdata), content_type="application/json")
def comparevalue(request):
particulaval = request.POST['particulaval']
amountval = request.POST['amountval']
particulaval = json.loads(particulaval)
amountval = json.loads(amountval)
# print("particulaval",particulaval)
# print("amountval",amountval)
data = pd.read_excel(DictFile)
df = pd.DataFrame(data, columns= ['Particulars','Amount'])
listpro = df.values.tolist()
#print(listpro)
val = 1
comparedal = [None] * len(particulaval)
# print(len(particulaval))
for k in range(len(particulaval)):
for j in listpro:
if str(particulaval[k]) == j[0]:
try:
#print(float(amountval[k].strip()),j[1])
#print(float(amountval[k].strip()) - j[1])
comparedal[k] = float(amountval[k].strip()) - float(j[1])
break
except:
print("")
# print(comparedal)
rjdata_Dat = {'comparedval' : comparedal }
return HttpResponse(json.dumps(rjdata_Dat), content_type="application/json")
def updatexlxs(request):
updateparticularval = request.POST['updateparticularval']
updateamountval = request.POST['updateamountval']
print(updateparticularval)
print(updateamountval)
df = pd.DataFrame({"Particulars":[updateparticularval], "Amount":[updateamountval]})
append_df_to_excel(df, DictFile)
return HttpResponse(json.dumps("saved"), content_type="application/json")
def insertvaluetoexcel(request):
sendHeaders = request.POST['sendHeaders']
sendHeaders = json.loads(sendHeaders)
sendData = request.POST['sendData']
sendData = json.loads(sendData)
sendtabledata = request.POST['sendtabledata']
sendtabledata = json.loads(sendtabledata)
# print(sendtabledata[0])
writer = pd.ExcelWriter(ExcelOutputFile, engine='xlsxwriter')
i = 1
for t in sendtabledata:
df2 = pd.DataFrame(t)
# df2 = df2.transpose()
df2.to_excel(writer, sheet_name='Table Data - '+str(i),header=False,index=False)
workbook1 = writer.book
worksheet1 = writer.sheets['Table Data - '+str(i)]
font_fmt = workbook1.add_format({'font_name': 'Arial', 'font_size': 10})
header_fmt = workbook1.add_format({'font_name': 'Arial', 'font_size': 10, 'bold': True})
worksheet1.set_column('A:A', None, font_fmt)
worksheet1.set_row(0, None, header_fmt)
i += 1
i = 1
for a in sendHeaders:
df2 = pd.DataFrame(a)
df2 = df2.transpose()
df2.to_excel(writer, sheet_name='Extracted Data - '+str(i),header=False,index=False)
workbook1 = writer.book
worksheet1 = writer.sheets['Extracted Data - '+str(i)]
font_fmt = workbook1.add_format({'font_name': 'Arial', 'font_size': 10})
header_fmt = workbook1.add_format({'font_name': 'Arial', 'font_size': 10, 'bold': True})
worksheet1.set_column('A:A', None, font_fmt)
worksheet1.set_row(0, None, header_fmt)
i += 1
i = 1
for a in sendData:
df2 = pd.DataFrame(a)
df2 = df2.transpose()
df2.to_excel(writer, sheet_name='Extracted Data - '+str(i),header=False,startrow=1,index=False)
workbook1 = writer.book
worksheet1 = writer.sheets['Extracted Data - '+str(i)]
font_fmt = workbook1.add_format({'font_name': 'Arial', 'font_size': 10})
header_fmt = workbook1.add_format({'font_name': 'Arial', 'font_size': 10, 'bold': True})
worksheet1.set_column('A:A', None, font_fmt)
worksheet1.set_row(0, None, header_fmt)
i += 1
writer.save()
return HttpResponse(json.dumps("saved"), content_type="application/json")
def append_df_to_excel(df, excel_path):
df_excel = pd.read_excel(excel_path)
result = pd.concat([df_excel, df], ignore_index=True)
result.to_excel(excel_path, index=False)
def TextToArray(AllPageText):
EachPageArray = AllPageText.split("%%%%%%")
FinalLableAndTableForAllPageArray = []
for EachPage in EachPageArray:
PageLableData = []
PageTableData = []
PageLableTableArray = EachPage.split("$$$$$$")
if(len(PageLableTableArray)>0):
LableArray = PageLableTableArray[0].split("######")
for eachLableGroup in LableArray:
eachLableRow = eachLableGroup.split("@@@@@@")
lableRow = []
seperatorRow = []
valueRow = []
if(len(eachLableRow)>0):
lableRow = eachLableRow[0].split("!!!!!!")
if(len(eachLableRow)>1):
seperatorRow = eachLableRow[1].split("!!!!!!")
if(len(eachLableRow)>2):
valueRow = eachLableRow[2].split("!!!!!!")
PageLableData.append([lableRow,seperatorRow,valueRow])
if(len(PageLableTableArray)>1):
TableArray = PageLableTableArray[1].split("^^^^^^")
CurrentPageTableData = []
for eachTable in TableArray:
AlltablesInEachPageArray =eachTable.split("######")
for eachTableGroup in AlltablesInEachPageArray:
eachTableRow = eachTableGroup.split("@@@@@@")
TableDataValue = []
TableDataCoordinates = []
if(len(eachTableRow)>0):
TableDataValue = eachTableRow[0].split("!!!!!!")
if(len(eachTableRow)>1):
TableDataCoordinates = eachTableRow[1].split("!!!!!!")
CurrentPageTableData.append([TableDataValue,TableDataCoordinates])
PageTableData.append(CurrentPageTableData)
CurrentPageTableData=[]
FinalLableAndTableForAllPageArray.append([PageLableData,PageTableData])
return FinalLableAndTableForAllPageArray
#================= Tasks ===========================
def user_process_new_task(request):
datat = tabletask.objects.all()
return render(request,"user_process_new.html",{'datat': datat})
def vaidate_file(request):
fname = request.POST[r'fname']
if os.path.isdir(fname) == True:
return HttpResponse(json.dumps("Suceess"), content_type="application/json")
else:
return HttpResponse(json.dumps("unSuceess"), content_type="application/json")
def processinput(request):
ifname = request.POST[r'ifname']
ofname = request.POST[r'ofname']
oename = request.POST[r'oename']
pname = request.POST[r'pname']
print(ifname,ofname,oename)
listofpdf = []
exe = ImageMagickAddress
for r, d, f in os.walk(ifname):
for file in f:
if '.pdf' in file:
listofpdf.append(file)
for i in listofpdf:
os.rename(ifname+"\\" + i,ifname+"\\" + i.replace(" ","__"))
listofpdf = []
for r, d, f in os.walk(ifname):
for file in f:
if '.pdf' in file:
listofpdf.append(file)
print(listofpdf)
entertaskdetailstodb(ifname,ofname,pname,oename,listofpdf)
jdata = {
'listoffile' : listofpdf
}
return HttpResponse(json.dumps(jdata), content_type="application/json")
def entertaskdetailstodb(ifname,ofname,pname,oename,listofpdf):
today = date.today()
now = datetime.now()
current_time = str(today) + " " + str(now.strftime("%H:%M:%S"))
#print("Today's date:", today,current_time)
datatoenter = tabletask(timestamp = current_time,inputfilelocation = ifname,outputfilelocatin= ofname,processedfilelocation = pname,excelfilename = oename,totalnumberoffiles =len(listofpdf))
datatoenter.save()
ocrconverionengine(current_time,ifname,ofname,pname,oename,listofpdf)
def ocrconverionengine(current_time,ifname,ofname,pname,oename,listofpdf):
"""
listofpdf = []
for r, d, f in os.walk(ifname):
for file in f:
if '.pdf' in file:
listofpdf.append(file)
"""
exe = "C:\Program Files\ImageMagick-7.0.9-Q16\convert.exe"
for i in listofpdf:
if os.path.isdir(ofname+"\\"+i.rstrip(".pdf")) == False:
os.mkdir(ofname+"\\"+i.rstrip(".pdf"))
cmd1 = exe +' -strip -alpha off -density 300 "' + ifname + "\\"+i + '" -depth 2 -quality 300 "' + ofname + "\\" + i.rstrip(".pdf") + "\\" + i.rstrip(".pdf")+'.tiff"'
subprocess.call(cmd1)
print("======= pdf to tiff conversion ========")
cmd = exe +' -density 250 "' + ifname + "\\"+i + '" -quality 300 "' + ofname + "\\" + i.rstrip(".pdf") + "\\" + i.rstrip(".pdf")+'.png"'
#print(cmd)
subprocess.call(cmd)
print("======== pdf to png conversion =========")
shutil.move(ifname + "\\"+i,pname + "\\" + i)
#print(ofname+"\\"+i.rstrip(".pdf")+"\\"+i.replace(".pdf",".tiff"))
if os.path.exists(ofname+"\\"+i.rstrip(".pdf")+"\\"+i.replace(".pdf",".tiff")) == True:
print("enter")
test_cmd = "tesseract " + ofname+"\\"+i.rstrip(".pdf")+"\\"+i.replace(".pdf",".tiff") +" "+ ofname +"\\"+i.rstrip(".pdf")+"\\"+i.rstrip(".pdf") + " --dpi 300 --psm 3 hocr"
print("=== tesseract command=====::",test_cmd)
subprocess.call(test_cmd)
print("====== engine funcion called =======")
extarctedoututdata = hocrf.mainfunction(ofname +"\\"+i.rstrip(".pdf")+"\\"+i.replace(".pdf",".hocr"))
print("====== engine funcion ended =======")
#print(extarctedoututdata)
testd = viewdatafilepath(timestamp = current_time,inputfilelocation = ofname +"\\"+i.rstrip(".pdf"),filename = i.rstrip(".pdf"),EngineData = extarctedoututdata, dataextracted= 86,dataaccuracy = 96,noisepercentage = 12,totalpages = len(listofpdf))
testd.save()
print("======== Data Saved ================")
#print(i)
copytree(ofname+"\\" + i.rstrip(".pdf"),i)
def div_ocr_page_extract_hihg(div_p_word):
div_ocr_page_id = []
div_ocr_page_cordinate = []
div_ocr_page_data = []
return_array_page = []
div_p_word = div_p_word.find_all('div',class_='ocr_page')
for word in div_p_word:
div_ocr_page_id.append(word.get('id'))
div_temp = word.get('title').split(";")[1].split(" ")
div_ocr_page_cordinate.append([int(div_temp[2]),int(div_temp[3]),int(div_temp[4]),int(div_temp[5])])
div_ocr_page_data.append(word)
# print("------------------")
# print(div_ocr_page_id)
return_array_page.append([div_ocr_page_id,div_ocr_page_cordinate,div_ocr_page_data])
# print("length",len(return_array_page[0][2]))
return div_ocr_page_data
def ocrfunctionalitytest(request):
temp = "fas"
tst = json.dumps(temp)
current_time = "1/7/2020"
ofname = "D:/mediaeast/Testing/output/50272"
fiename = "50272"
fname = r"D:\mediaeast\Testing\5045.pdf"
#tt = [[1,2,3,4],[1,2,3,'']]
#testd = testtable(tid = 1, myfile = fname)
#testd = viewdatafilepath(timestamp = current_time,inputfilelocation = ofname ,filename = fiename,EngineData = temp, dataextracted= 86,dataaccuracy = 96,noisepercentage = 12,totalpages = len(temp[0]))
#testd.save()
a = ""
rdata = testtable.objects.all()
for r in rdata:
a = r.myfile
return HttpResponse(json.dumps(a), content_type="application/json")
def testingocrfunction(request):
"""
fil = r"D:\mediaeast\Testing\output\test\test.hocr"
extarctedoututdata = hocrf.mainfunction(fil)
print(extarctedoututdata)
testd = view_data_filepath(EngineData = extarctedoututdata)
testd.save()
extarctedoututdata = [['a',1,2,3,4],['b',4,5,6,7]]
testd = view_data_filepath(EngineData = extarctedoututdata)
testd.save()
datat = view_data_filepath.objects.all()
for d in datat:
for i in d.EngineData:
for j in i:
print(j)
"""
ofname = "D:\mediaeast\Testing\output"
i = "ADITYA__B__FINAL__BILL"
copytree(ofname+"\\" + i.rstrip(".pdf"))
return HttpResponse(json.dumps("test"), content_type="application/json")
def loadtaskdata(request):
returndata = []
datat = tabletask.objects.all()
async def test():
loop = asyncio.get_event_loop()
await asyncio.sleep(3)
print("test called")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
loop.close()
"""
for d in datat:
returndata.append([d.taskid,d.timestamp,d.inputfilelocation,d.outputfilelocatin,d.processedfilelocation,d.excelfilename,d.totalnumberoffiles])
"""
jdata1 = {
'tabledata' : returndata
}
return HttpResponse(json.dumps(jdata1), content_type="application/json")
#================= User Process ====================
def user_details(request):
t_id = request.POST['t_id']
t_name = request.POST['t_name']
t_desc = request.POST['t_desc']
template_details = request.POST['templates_details']
input_filename = request.POST['input_file_name']
output_filename = request.POST['output_file_name']
file_name_pattern = request.POST['file_name_pattern']
task_details = task_creation(task_id = t_id, task_name = t_name, task_desc = t_desc, template_id =template_details,input_file_name = input_filename,output_file_name = output_filename,file_name_pattern = file_name_pattern)
task_creation.save(task_details)
#task_operation.check_task_status()
#insert_data_todb.insert_data_val()
#task_operation.arr_clear()
return HttpResponse(json.dumps("Created"), content_type="application/json")
def list_of_task(request):
list_task = task_creation.objects.all()
print(list_task)
return render(request,"user_process_view.html",{'list_task':list_task})
#================== Model Function ===================
input_filename = []
output_filename = []
t1 = ""
def Find_number_of_files(request):
global t1
t1 = threading.Thread(target=test_function)
t1.start()
return HttpResponse(json.dumps("Created"), content_type="application/json")
def test_function():
task_s = task_creation.objects.filter(status="Pending")
for t in task_s:
input_filename.append(t.input_file_name)
output_filename.append(t.output_file_name)
print(input_filename,output_filename)
time.sleep(5)
global t1
t1.join()
input_filename.clear()
output_filename.clear()
|
client.py
|
"""
Web socket client.
"""
from socket import AF_INET, SOCK_STREAM, socket
from subprocess import call
from sys import argv
from sys import exit as sys_exit
from threading import Thread
from colorama import Fore
END = "END"
def main() -> None:
"""
main function of the program.
"""
port: int = get_safe_port()
connect_to_server(port)
def color_print(color: str, *message: str) -> None:
"""
Print a message with a specific color and go back to default.
"""
print(color + " ".join(message) + Fore.RESET)
def error(message: str) -> None:
"""
Print an error message in red color and exit the program.
"""
color_print(Fore.RED, message)
sys_exit(1)
def okay(message: str) -> None:
"""
Print a green message for indicating successful process.
"""
color_print(Fore.GREEN, message)
def get_safe_port() -> int:
"""
Get a port from the command line arguments checking if is valid.
"""
if len(argv) < 2:
error("Usage: make run port=<port>")
try:
return int(argv[1])
except ValueError:
error(f'Invalid port "{argv[1]}"')
def connect_to_server(port: int) -> None:
"""
Connect to a tcp server in localhost from a given port.
"""
sock = socket(AF_INET, SOCK_STREAM)
try:
sock.connect(("0.0.0.0", port))
except ConnectionRefusedError:
error("Unable to connect with server")
call("clear")
okay(f"Connected to {sock.getpeername()}\n")
manage_messages(sock)
def manage_messages(sock: socket) -> None:
"""
Control the message flow with threads.
"""
recv_thread = Thread(target=receive_data, args=(sock,))
send_thread = Thread(target=send_message, args=(sock,))
recv_thread.start()
send_thread.start()
def send_message(sock: socket) -> None:
"""
Send a message to the server with socket.
"""
print(f"Type for sending a message or type {END} for exit.\n")
while True:
message = input()
sock.send(message.encode())
if message.upper() == END:
sock.close()
sys_exit(0)
def receive_data(sock: socket) -> None:
"""
Receive data from the server.
"""
while True:
data = sock.recv(1024)
if not data:
break
color_print(Fore.CYAN, data.decode())
if __name__ == "__main__":
main()
|
app.py
|
#!/usr/bin/env python
# This work is based on original code developed and copyrighted by TNO 2020.
# Subsequent contributions are licensed to you by the developers of such code and are
# made available to the Project under one or several contributor license agreements.
#
# This work is licensed to you under the Apache License, Version 2.0.
# You may obtain a copy of the license at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# TNO - Initial implementation
# Manager:
# TNO
import importlib
import json
import urllib
import uuid
from datetime import datetime
from pprint import pprint
from warnings import warn
import jwt
import requests
from flask import Flask, Response, redirect, render_template, request, send_from_directory, session
from flask_executor import Executor
from flask_oidc import OpenIDConnect
from flask_session import Session
from flask_socketio import SocketIO, emit
from pyecore.ecore import EDate
import src.esdl_config as esdl_config
import src.settings as settings
from esdl import esdl
from esdl.esdl_handler import EnergySystemHandler
from esdl.processing import ESDLAsset, ESDLEcore, ESDLEnergySystem, ESDLGeometry, ESDLQuantityAndUnits
from esdl.processing.ESDLAsset import get_asset_capability_type
from esdl.processing.EcoreDocumentation import EcoreDocumentation
from extensions.bag import BAG
from extensions.boundary_service import BoundaryService
from extensions.es_statistics import ESStatisticsService
from extensions.esdl_api import ESDL_API
from extensions.esdl_browser import ESDLBrowser
from extensions.esdl_compare import ESDLCompare
from extensions.esdl_drive import ESDLDrive
from extensions.esdl_merge import ESDLMerge
from extensions.essim import ESSIM
from extensions.essim_sensitivity import ESSIMSensitivity
from extensions.etm_local import ETMLocal
from extensions.heatnetwork import HeatNetwork
from extensions.ibis import IBISBedrijventerreinen
from extensions.ielgas import IELGAS
from extensions.mapeditor_settings import MAPEDITOR_UI_SETTINGS, MapEditorSettings
from extensions.pico_rooftoppv_potential import PICORooftopPVPotential
from extensions.port_profile_viewer import PortProfileViewer
from extensions.profiles import Profiles
from extensions.session_manager import del_session, delete_sessions_on_disk, get_handler, get_session, \
get_session_for_esid, schedule_session_clean_up, set_handler, set_session, set_session_for_esid, valid_session
from extensions.settings_storage import SettingsStorage
from extensions.shapefile_converter import ShapefileConverter
from extensions.spatial_operations import SpatialOperations
from extensions.time_dimension import TimeDimension
from extensions.vesta import Vesta
from extensions.workflow import Workflow
from src.asset_draw_toolbar import AssetDrawToolbar
from src.assets_to_be_added import AssetsToBeAdded
from src.datalayer_api import DataLayerAPI
from src.edr_assets import EDRAssets
from src.esdl_helper import asset_state_to_ui, generate_profile_info, get_asset_and_coord_from_port_id, \
get_asset_from_port_id, get_connected_to_info, get_port_profile_info, get_tooltip_asset_attrs, \
update_carrier_conn_list
from src.esdl_services import ESDLServices
from src.essim_kpis import ESSIM_KPIs
from src.essim_validation import validate_ESSIM
from src.log import get_logger
from src.process_es_area_bld import get_building_information, process_energy_system
from src.user_logging import UserLogging
from src.version import __long_version__ as mapeditor_version
from src.view_modes import ViewModes
from src.wms_layers import WMSLayers
from utils.datetime_utils import parse_date
print('MapEditor version {}'.format(mapeditor_version))
logger = get_logger(__name__)
if settings.USE_GEVENT:
import gevent.monkey
gevent.monkey.patch_all()
logger.info("Using GEvent")
#TODO fix send_file in uwsgi
# debugging with pycharm:
#https://stackoverflow.com/questions/21257568/debugging-a-uwsgi-python-application-using-pycharm/25822477
user_actions_logging = UserLogging()
if settings.settings_storage_config["host"] is None or settings.settings_storage_config["host"] == "":
logger.error("Settings storage is not configured. Aborting...")
exit(1)
settings_storage = SettingsStorage(database_uri='mongodb://' + settings.settings_storage_config["host"] + ':' + settings.settings_storage_config["port"])
wms_layers = WMSLayers(settings_storage)
# handler to retrieve ESDL documentation
esdl_doc = EcoreDocumentation(esdlEcoreFile="esdl/esdl.ecore")
def is_running_in_uwsgi():
try:
import uwsgi
a = uwsgi.opt
logger.info("uWSGI startup options: {}".format(a))
return True
except Exception:
return False
# ---------------------------------------------------------------------------------------------------------------------
# Application definition, configuration and setup of simple file server
# ---------------------------------------------------------------------------------------------------------------------
app = Flask(__name__)
app.config['SECRET_KEY'] = b'\xc3g\x19\xbf\x8e\xa0\xe7\xc8\x9a/\xae%\x04g\xbe\x9f\xaex\xb5\x8c\x81f\xaf`' #os.urandom(24) #'secret!'
app.config['SESSION_COOKIE_NAME'] = 'ESDL-WebEditor-session'
app.config['SESSION_COOKIE_SAMESITE'] = 'Lax'
# app.config['SESSION_COOKIE_SECURE'] = True
app.config['SESSION_PERMANENT'] = True
app.config['SESSION_TYPE'] = 'filesystem'
app.config['PERMANENT_SESSION_LIFETIME'] = 60*60*24 # 1 day in seconds
app.config['SESSION_FILE_DIR'] = '/tmp/flask_session'
app.config['EXECUTOR_PROPAGATE_EXCEPTIONS'] = True # make sure errors are logged for tasks run in threads
logger.info("Socket.IO Async mode: {}".format(settings.ASYNC_MODE))
logger.info('Running inside uWSGI: {}'.format(is_running_in_uwsgi()))
socketio = SocketIO(app, async_mode=settings.ASYNC_MODE, manage_session=False, path='/socket.io', logger=settings.FLASK_DEBUG)
# logging.getLogger('engineio').setLevel(logging.WARNING) # don't print all the messages
# remove existing sessions when restarting, existing sessions will give errors
# as associated ESDLs are not stored in the session and the OpenId connect info is wrong
delete_sessions_on_disk(app.config['SESSION_FILE_DIR'])
# fix sessions with socket.io. see: https://blog.miguelgrinberg.com/post/flask-socketio-and-the-user-session
Session(app)
executor = Executor(app)
#extensions
schedule_session_clean_up()
HeatNetwork(app, socketio)
IBISBedrijventerreinen(app, socketio)
ESDLBrowser(app, socketio, esdl_doc)
BAG(app, socketio)
BoundaryService(app, socketio, settings_storage)
esdl_api = ESDL_API(app, socketio)
ESDLCompare(app, socketio)
ESDLMerge(app, socketio, executor)
essim_kpis = ESSIM_KPIs(app, socketio)
essim = ESSIM(app, socketio, executor, essim_kpis, settings_storage)
ESSIMSensitivity(app, socketio, settings_storage, essim)
Vesta(app, socketio, settings_storage)
Workflow(app, socketio, settings_storage)
ESStatisticsService(app, socketio)
MapEditorSettings(app, socketio, settings_storage)
profiles = Profiles(app, socketio, executor, settings_storage)
ESDLDrive(app, socketio, executor)
ShapefileConverter(app, socketio, executor)
time_dimension = TimeDimension(app, socketio, executor, settings_storage)
IELGAS(app, socketio, settings_storage)
ETMLocal(app, socketio, settings_storage)
PortProfileViewer(app, socketio, settings_storage)
esdl_services = ESDLServices(app, socketio, settings_storage)
PICORooftopPVPotential(app, socketio)
SpatialOperations(app, socketio)
DataLayerAPI(app, socketio, esdl_doc)
ViewModes(app, socketio, settings_storage)
edr_assets = EDRAssets(app, socketio, settings_storage)
AssetsToBeAdded(app, socketio)
AssetDrawToolbar(app, socketio, settings_storage)
#TODO: check secret key with itsdangerous error and testing and debug here
app.config.update({
'TESTING': True,
'DEBUG': True,
'OIDC_ID_TOKEN_COOKIE_SECURE': False,
'OIDC_REQUIRE_VERIFIED_EMAIL': False,
'OIDC_USER_INFO_ENABLED': True,
'OIDC_OPENID_REALM': 'esdl-mapeditor',
'OIDC_SCOPES': ['openid', 'email', 'profile', 'groups', 'microprofile-jwt'],
'OIDC_INTROSPECTION_AUTH_METHOD': 'client_secret_post',
'OIDC_CLIENT_SECRETS': settings.OIDC_CLIENT_SECRETS
})
try:
oidc = OpenIDConnect(app)
except Exception as e:
logger.exception("Something went wrong when connecting to Keycloak")
import sys
sys.exit(1)
# TEMPORARY SOLUTION TO DISABLE BROWSER CACHING DURING TESTING
@app.after_request
def add_header(r: Response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
if r.content_type == 'image/png': # images are allowed to be cached.
return r
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
@app.before_request
def before_request():
# store session id
session['client_id'] = request.cookies.get(app.config['SESSION_COOKIE_NAME']) # get cookie id
@app.route('/')
def index():
store_enabled = settings.esdl_store_config or settings.mondaine_hub_config
return render_template('index.html', store_enabled=store_enabled)
"""
# test for OpenID connect authentication against KeyCloak
@app.route('/test')
@oidc.require_login
def test_authentication():
if oidc.user_loggedin:
user_email = oidc.user_getfield('email')
user_groups = oidc.user_getfield('user_group')
logger.debug('user: {}, user groups: {}'.format(user_email, user_groups))
whole_token = oidc.get_access_token()
if whole_token:
jwt_tkn = jwt.decode(whole_token, key=settings.IDM_PUBLIC_KEY, algorithms='RS256', audience='account',
verify=True)
pprint(jwt_tkn)
return jwt_tkn
else:
return "Hello world!"
else:
return "Not logged in"
"""
@app.route('/editor')
@oidc.require_login
def editor():
#session['client_id'] = request.cookies.get(app.config['SESSION_COOKIE_NAME']) # get cookie id
#set_session('client_id', session['client_id'])
logger.info('client_id is set to %s' % session['client_id'])
if oidc.user_loggedin:
if session['client_id'] == None:
warn('WARNING: No client_id in session!!')
whole_token = oidc.get_access_token()
logger.debug(f"whole_token: {whole_token}")
if whole_token:
try:
jwt_tkn = jwt.decode(whole_token, algorithms='RS256', verify=False)
pprint(jwt_tkn)
except Exception as e:
logger.exception(f"error in decoding token: {str(e)}")
# if role in access_token['resource_access'][client]['roles']:
user_email = oidc.user_getfield('email')
logger.info("************* USER LOGIN (" + user_email + ") at " + str(datetime.now()))
user_actions_logging.store_logging(user_email, "login", "", "", "", {})
userinfo = oidc.user_getinfo(['role'])
role = []
if 'role' in userinfo:
role = userinfo['role'].split(',')
# find roles in for the mapeditor client
mapeditor_role = []
client = oidc.client_secrets.get('client_id')
resource_access = oidc.user_getfield('resource_access')
if resource_access is not None and client in resource_access:
if 'roles' in resource_access[client]:
mapeditor_role = resource_access[client]['roles']
set_session('user-group', oidc.user_getfield('user_group'))
set_session('user-role', role)
set_session('user-email', user_email)
set_session('user-mapeditor-role', mapeditor_role)
set_session('jwt-token', whole_token)
user_fullname = oidc.user_getfield('name')
set_session('user-fullname', user_fullname)
esdl_store_enabled = not(settings.esdl_store_config["hostname"] is None or settings.esdl_store_config["hostname"] == "")
mondaine_hub_enabled = not(settings.mondaine_hub_config["hostname"] is None or settings.mondaine_hub_config["hostname"] == "")
store_enabled = esdl_store_enabled or mondaine_hub_enabled
esdl_drive_enabled = not(settings.esdl_drive_config["hostname"] is None or settings.esdl_drive_config["hostname"] == "")
edr_enabled = not(settings.edr_config["EDR_host"] is None or settings.edr_config["EDR_host"] == "")
essim_enabled = not(settings.essim_config["ESSIM_host"] is None or settings.essim_config["ESSIM_host"] == "")
boundary_service_enabled = not(settings.boundaries_config["host"] is None or settings.boundaries_config["host"] == "")
statistics_service_enabled = not(settings.statistics_settings_config["host"] is None or settings.statistics_settings_config["host"] == "")
bag_service_enabled = not(settings.bag_config["host"] is None or settings.bag_config["host"] == "")
ibis_service_enabled = not(settings.ibis_config["host"] is None or settings.ibis_config["host"] == "")
logger.info("store:{} drive:{} edr:{} bound:{} stat:{} bag:{} ibis:{}".format(store_enabled, esdl_drive_enabled,
edr_enabled, boundary_service_enabled, statistics_service_enabled,bag_service_enabled, ibis_service_enabled))
return render_template('editor.html',async_mode=socketio.async_mode,
role=role,
store_enabled=store_enabled,
esdl_drive_enabled=esdl_drive_enabled,
edr_enabled=edr_enabled,
essim_enabled=essim_enabled,
boundary_service_enabled=boundary_service_enabled,
statistics_service_enabled=statistics_service_enabled,
bag_service_enabled=bag_service_enabled,
ibis_service_enabled=ibis_service_enabled,
debug=settings.FLASK_DEBUG,
version=mapeditor_version
)
else:
return render_template('index.html')
# to enable working offline without IDM:
# - comment the @oidc.require_login above this method
# - comment the line above: return render_template('index.html')
# - uncomment the following line:
# return render_template('editor.html', async_mode=socketio.async_mode, role=role)
"""
Checks the OpenID connect session status
And refreshes if necessary?
"""
@app.route('/auth_status')
#@oidc.require_login
def auth_status():
from flask import g
#logger.debug("Global token: {}".format(g.oidc_id_token))
status: Response = oidc.authenticate_or_redirect()
if status is None:
if oidc.user_loggedin:
curr_token = get_session('jwt-token')
if oidc.get_access_token() is not None:
if curr_token is not None and curr_token == oidc.get_access_token():
return {'valid': True, 'reason': "Unchanged"}
else:
logger.info("Refreshed access token for {}".format(oidc.user_getfield('email')))
set_session('jwt-token', oidc.get_access_token())
return {'valid': True, 'reason': "Refreshed"}
else:
# this is the case when we restarted the app, but the browser still has a valid cookie and
# seems still authorized, while the token has not been refreshed or is accessible via oidc.
#if g.oidc_id_token is not None:
# update oidc with session info
#oidc.credentials_store[g.oidc_id_token['sub']] = g.oidc_id_token
#logger.debug("Setting cookie access token ", oidc.get_access_token())
#set_session('jwt-token', oidc.get_access_token())
#return {'valid': True, 'reason': "Updated token"}
g.oidc_id_token = None
oidc.logout()
status: Response = oidc.redirect_to_auth_server('/editor')
uri = status.headers["Location"]
return {'valid': False, 'reason': "Token expired or not available", 'redirect_uri': uri}
else:
oidc.logout()
return {'valid': False, 'reason': "Not logged in"}
else:
status: Response = oidc.redirect_to_auth_server('/editor') # get redirect for /editor, not /auth_status
uri = status.headers["Location"]
return {'valid': False, 'reason': "Authentication required", 'redirect_uri': uri}
# return status # returns a redirect, but that is consumed by the browser because of a 302 status
@app.route('/logout')
def logout():
user_email = get_session('user-email')
user_actions_logging.store_logging(user_email, "logout", "", "", "", {})
"""Performs local logout by removing the session cookie. and does a logout at the IDM"""
oidc.logout()
#This should be done automatically! see issue https://github.com/puiterwijk/flask-oidc/issues/88
return redirect(oidc.client_secrets.get('issuer') + '/protocol/openid-connect/logout?redirect_uri=' + request.host_url)
# Cant find out why send_file does not work in uWSGI with threading.
# Now we send manually the ESDL as string, which is (probably) not efficient.
# This still works with a 1.6 MB file... Not sure if this scales any further...
@app.route('/esdl')
def download_esdl():
"""Sends the current ESDL file to the browser as an attachment"""
esh = get_handler()
active_es_id = get_session('active_es_id')
try:
#stream = esh.to_bytesio()
my_es = esh.get_energy_system(es_id=active_es_id)
esh.update_version(es_id=active_es_id)
if my_es.esdlVersion is None or my_es.esdlVersion == '':
my_es.esdlVersion = esdl_doc.get_esdl_version()
try:
name = my_es.name
except:
name = my_es.id
if name is None or name == '':
name = "UntitledEnergySystem"
name = '{}.esdl'.format(name)
logger.info('Sending file %s' % name)
user_email = get_session('user-email')
user_actions_logging.store_logging(user_email, "download esdl", name, "", "", {})
content = esh.to_string(es_id=active_es_id)
#wrapped_io = FileWrapper(stream)
#logger.debug(content)
headers = dict()
#headers['Content-Type'] = 'application/esdl+xml'
headers['Content-Disposition'] = 'attachment; filename="{}"'.format(name)
headers['Content-Length'] = len(content)
return Response(content, mimetype='application/esdl+xml', direct_passthrough=True, headers=headers)
#return send_file(stream, as_attachment=True, mimetype='application/esdl+xml', attachment_filename=name)
except Exception as e:
import traceback
traceback.print_exc()
return "Error sending ESDL file, due to {}".format(e)
@app.route('/<path:path>')
def serve_static(path):
# logger.debug('in serve_static(): '+ path)
return send_from_directory('static', path)
# @app.route('/edr_assets')
# def get_edr_assets():
# edr_url = settings.edr_config['EDR_host']+'/store/tagged?tag=asset'
# # logger.debug('accessing URL: '+edr_url)
#
# try:
# r = requests.get(edr_url)
# if r.status_code == 200:
# result = json.loads(r.text)
# asset_list = []
# for a in result:
# asset = {'id': a["id"], 'title': a["title"], 'description': a["description"]}
# asset_list.append(asset)
#
# return (jsonify({'asset_list': asset_list})), 200
# else:
# logger.error('code: ', r.status_code)
# send_alert('Error in getting the EDR assets')
# abort(500, 'Error in getting the EDR assets')
# except Exception as e:
# logger.error('Exception: ')
# logger.error(e)
# send_alert('Error accessing EDR API')
# abort(500, 'Error accessing EDR API')
# ---------------------------------------------------------------------------------------------------------------------
# File I/O and ESDL Store API calls
# ---------------------------------------------------------------------------------------------------------------------
if settings.esdl_store_config is not None and settings.esdl_store_config != "":
default_store_url = settings.esdl_store_config['hostname'] + '/store/'
else:
default_store_url = None
if settings.mondaine_hub_config is not None and settings.mondaine_hub_config != "":
mondaine_hub_url = settings.mondaine_hub_config['hostname'] + '/store/'
else:
mondaine_hub_url = None
def create_ESDL_store_item(id, esh, title, description, email):
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url
else:
store_url = default_store_url
if store_url:
esdlstr = esh.to_string()
try:
payload = {'id': id, 'title': title, 'description': description, 'email':email, 'esdl': esdlstr}
requests.post(store_url, data=payload)
except Exception as e:
send_alert('Error accessing ESDL store:' + str(e))
def load_ESDL_EnergySystem(store_id):
store_item = load_store_item(store_id)
if store_item:
esdlstr = store_item['esdl']
del store_item['esdl']
set_session('store_item_metadata', store_item)
emit('store_item_metadata', store_item)
try:
esh = get_handler()
es, parse_info = esh.load_from_string(esdl_string=esdlstr, name=store_item['title'])
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(store_item['title'], info))
return esh
except Exception as e:
send_alert('Error interpreting ESDL file from store: ' + str(e))
return None
else:
return None
def import_ESDL_EnergySystem(store_id):
store_item = load_store_item(store_id)
if store_item:
esdlstr = store_item['esdl']
del store_item['esdl']
set_session('store_item_metadata', store_item)
emit('store_item_metadata', store_item)
try:
esh = get_handler()
imported_es, parse_info = esh.add_from_string(esdl_string=esdlstr, name=store_item['title'])
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(store_item['title'], info))
return imported_es
except Exception as e:
send_alert('Error interpreting ESDL file from store: ' + str(e))
return None
else:
return None
def load_store_item(store_id):
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url
else:
store_url = default_store_url
if store_url:
url = store_url + store_id + '?format=xml'
try:
r = requests.get(url)
except Exception as e:
send_alert('Error accessing ESDL store:' + str(e))
return None
if r.status_code == 200:
result = json.loads(r.text)
if len(result) > 0:
return result
else:
return None
else:
logger.error('Accessing store return status: '+str(r.status_code))
send_alert('Error accessing ESDL store:' + str(r))
return None
else:
return None
def update_store_item(store_id, title, descr, email, tags, esh):
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url
else:
store_url = default_store_url
if store_url:
esdlstr = esh.to_string()
payload = {'id': store_id, 'title': title, 'description': descr, 'email': email, 'tags': tags, 'esdl': esdlstr}
try:
requests.put(store_url + store_id, data=payload)
except Exception as e:
send_alert('Error saving ESDL file to store: ' + str(e))
def create_new_store_item(store_id, title, descr, email, tags, esh):
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url
else:
store_url = default_store_url
if store_url:
esdlstr = esh.to_string()
payload = {'id': store_id, 'title': title, 'description': descr, 'email': email, 'tags': tags, 'esdl': esdlstr}
try:
r = requests.post(store_url, data=payload)
except Exception as e:
send_alert('Error saving ESDL file to store: ' + str(e))
if r.status_code != 201:
send_alert('Error saving ESDL file to store. Error code: ' + str(r.status_code))
# ---------------------------------------------------------------------------------------------------------------------
# parse the ESDL config file
# ---------------------------------------------------------------------------------------------------------------------
def parse_esdl_config():
esdlc = esdl_config.esdl_config
logger.info('Configuration found: {}'.format(esdlc))
# ---------------------------------------------------------------------------------------------------------------------
# Send alert to client UI
# ---------------------------------------------------------------------------------------------------------------------
def send_alert(message):
logger.warning(message)
emit('alert', message, namespace='/esdl')
# FIXME: pyecore
def _set_carrier_for_connected_transport_assets(asset_id, carrier_id, processed_assets):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
processed_assets.append(asset_id)
port_list = []
for p in asset.port:
p.carrier = esh.get_by_id(active_es_id, carrier_id) #FIXME pyecore
conn_to = p.connectedTo
if conn_to:
for conn_port in conn_to:
conn_asset = get_asset_from_port_id(esh, active_es_id, conn_port.id)
if isinstance(conn_asset, esdl.Transport) and not isinstance(conn_asset, esdl.HeatExchange) \
and not isinstance(conn_asset, esdl.Transformer):
if conn_asset.id not in processed_assets:
_set_carrier_for_connected_transport_assets(conn_asset.id, carrier_id, processed_assets)
else:
conn_asset_port_list = []
for conn_asset_port in conn_asset.port:
if conn_asset_port.id == conn_port.id:
conn_asset_port.carrier = p.carrier
for conn_to_same_port in conn_asset_port.connectedTo:
if conn_to_same_port.id is not p.id: # don't traverse back to the original port
conn_to_same_port_asset = get_asset_from_port_id(esh, active_es_id, conn_to_same_port.id)
if not conn_to_same_port_asset.id in processed_assets:
_set_carrier_for_connected_transport_assets(conn_to_same_port_asset.id, carrier_id, processed_assets)
conn_asset_port_list.append({'name': conn_asset_port.name, 'id': conn_asset_port.id,
'type': type(conn_asset_port).__name__, 'conn_to': [pt.id for pt in conn_asset_port.connectedTo],
'carrier': conn_asset_port.carrier.id if conn_asset_port.carrier else None})
# also update the ports of the 'leaf' asset (recursion stops here)
emit('update_asset', {'asset_id': conn_asset.id, 'ports': conn_asset_port_list})
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo], 'carrier': p.carrier.id if p.carrier else None})
# update the asset ports in the gui, if the carrier has changed.
emit('update_asset', {'asset_id': asset.id, 'ports': port_list})
def set_carrier_for_connected_transport_assets(asset_id, carrier_id):
processed_assets = [] # List of asset_id's that are processed
_set_carrier_for_connected_transport_assets(asset_id, carrier_id, processed_assets)
# logger.debug(processed_assets)
# ---------------------------------------------------------------------------------------------------------------------
# Build up initial information about energysystem to send to browser
# ---------------------------------------------------------------------------------------------------------------------
def generate_point_in_area(boundary):
return
def update_building_asset_geometries(building, avail_locations):
for basset in building.asset:
if isinstance(basset, esdl.EnergyAsset):
geom = basset.geometry
if not geom:
location = avail_locations.pop(0)
geom = esdl.Point(lon=location[1], lat=location[0])
basset.geometry = geom
def update_area_asset_geometries(area, avail_locations):
# process subareas
for ar in area.area:
update_area_asset_geometries(ar, avail_locations)
# process assets in area
for asset in area.asset:
if isinstance(asset, esdl.AbstractBuilding):
update_building_asset_geometries(asset, avail_locations)
if isinstance(asset, esdl.EnergyAsset):
geom = asset.geometry
if not geom:
location = avail_locations.pop(0)
geom = esdl.Point()
geom = esdl.Point(lon=location[1], lat=location[0])
asset.geometry = geom
def count_building_assets_and_potentials(building):
# TODO: Error: BuildingUnits are taken into account
# TODO: add potentials
num = len(building.asset)
for basset in building.asset:
if isinstance(basset, esdl.AbstractBuilding):
num += count_building_assets_and_potentials(basset)
return num
def count_assets_and_potentials(area):
num = len(area.asset)
num += len(area.potential)
for ar_asset in area.asset:
if isinstance(ar_asset, esdl.AbstractBuilding):
num += count_building_assets_and_potentials(ar_asset)
for ar in area.area:
num += count_assets_and_potentials(ar)
return num
def calculate_triangle_center(triangle):
sumx = triangle[0][0] + triangle[1][0] + triangle[2][0]
sumy = triangle[0][1] + triangle[1][1] + triangle[2][1]
center_coord = [sumx / 3, sumy / 3]
return center_coord
def get_control_strategy_info(asset):
control_strategy = asset.controlStrategy
if control_strategy:
cs_info = {
'id': control_strategy.id,
'name': control_strategy.name,
'type': type(control_strategy).__name__
}
if isinstance(control_strategy, esdl.DrivenByDemand):
if control_strategy.outPort:
cs_info['out_port_id'] = control_strategy.outPort.id
if isinstance(control_strategy, esdl.DrivenBySupply):
if control_strategy.inPort:
cs_info['in_port_id'] = control_strategy.inPort.id
if isinstance(control_strategy, esdl.DrivenByProfile):
if control_strategy.port:
cs_info['port_id'] = control_strategy.port.id
if control_strategy.profile:
cs_info['profile_id'] = control_strategy.profile.id
if isinstance(control_strategy, esdl.StorageStrategy):
mcc, mdc = get_storage_marginal_costs(asset.id)
cs_info['marginal_charge_costs'] = mcc
cs_info['marginal_discharge_costs'] = mdc
if isinstance(control_strategy, esdl.CurtailmentStrategy):
cs_info['max_power'] = control_strategy.maxPower
if isinstance(control_strategy, esdl.PIDController):
cs_info['kp'] = control_strategy.Kp
cs_info['ki'] = control_strategy.Ki
cs_info['kd'] = control_strategy.Kd
return cs_info
else:
return {}
def add_bld_to_area_bld_list(bld_to_add, to_area_or_bld_id, ab_list):
# area_bld_list.append(['Building', building.id, building.name, level])
for idx, rcv_ab in enumerate(ab_list):
if rcv_ab[1] == to_area_or_bld_id:
ab_list.insert(idx+1, ['Building', bld_to_add.id, bld_to_add.name, rcv_ab[3] + 1])
def add_area_to_area_bld_list(area_to_add, to_area_id, ab_list):
# area_bld_list.append(['Area', area.id, area.name, level])
for idx, rcv_ab in enumerate(ab_list):
if rcv_ab[1] == to_area_id:
ab_list.insert(idx+1, ['Area', area_to_add.id, area_to_add.name, rcv_ab[3] + 1])
def remove_ab_from_area_bld_list(ab_id, ab_list):
for idx, ab in enumerate(ab_list):
if ab[1] == ab_id:
ab_list.pop(idx)
return
# TODO: Not used now, should we keep the conn_list updated? --> Yes, now we do! For redrawing when selecting carriers
# 13-1-2020: Commented out: energycarrier info for port not added yet because function is not used at the moment.
#def add_connection_to_list(conn_list, from_port_id, from_asset_id, from_asset_coord, to_port_id, to_asset_id, to_asset_coord):
# conn_list.append(
# {'from-port-id': from_port_id, 'from-asset-id': from_asset_id, 'from-asset-coord': from_asset_coord,
# 'to-port-id': to_port_id, 'to-asset-id': to_asset_id, 'to-asset-coord': to_asset_coord})
def update_asset_connection_locations(ass_id, lat, lon):
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
for c in conn_list:
if c['from-asset-id'] == ass_id:
c['from-asset-coord'] = (lat, lon)
if c['to-asset-id'] == ass_id:
c['to-asset-coord'] = (lat, lon)
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
def update_transport_connection_locations(ass_id, asset, coords):
active_es_id = get_session('active_es_id')
esh = get_handler()
conn_list = get_session_for_esid(active_es_id, 'conn_list')
# logger.debug('Updating locations')
for c in conn_list:
if c['from-asset-id'] == ass_id:
port_id = c['from-port-id']
port_ass_map = get_asset_and_coord_from_port_id(esh, active_es_id, port_id)
c['from-asset-coord'] = port_ass_map['coord']
if c['to-asset-id'] == ass_id:
port_id = c['to-port-id']
port_ass_map = get_asset_and_coord_from_port_id(esh, active_es_id, port_id)
c['to-asset-coord'] = port_ass_map['coord']
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
def update_polygon_asset_connection_locations(ass_id, coords):
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
for c in conn_list:
if c['from-asset-id'] == ass_id:
c['from-asset-coord'] = coords
if c['to-asset-id'] == ass_id:
c['to-asset-coord'] = coords
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
set_session_for_esid(active_es_id, 'conn_list', conn_list)
# ---------------------------------------------------------------------------------------------------------------------
# Create connections between assets
# ---------------------------------------------------------------------------------------------------------------------
def connect_ports(port1, port2):
port1.connectedTo.append(port2)
def split_conductor(conductor, location, mode, conductor_container):
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
esh = get_handler()
geometry = conductor.geometry
conductor_type = type(conductor).__name__
conductor_id = conductor.id
middle_point = esdl.Point(lat=location['lat'], lon=location['lng']) #no elevation?
if isinstance(geometry, esdl.Line):
#create two seperate line segments
line1 = esdl.Line()
line2 = esdl.Line()
#find piece of line where user clicked
points = geometry.point
begin_point = points[0]
first_point = points[0] # make an additional copy
# Ewoud: this code is not so nice since it manipulates the original geometry.point with points.pop(0) later on
# this should be fixed, but not now (not time)
# pyEcore: somehow using points[0] does something strange in the serialization to XML
# instead of <point xsi:type="esdl:Point"> you get <esdl:Point lat=...> which is wrong
# duplicating this point manually fixes this, probably because there is a reference to this point
# elsewhere which gets serialized as an <esdl:Point>
# officially we should duplicate all Point in this code
line1.point.append(esdl.Point(lat=begin_point.lat, lon=begin_point.lon, elevation=begin_point.elevation))
points.pop(0)
min_dist = 1e99
segm_ctr = 0
min_dist_segm = 0
for point in points:
p1 = {'x': begin_point.lat, 'y': begin_point.lon}
p2 = {'x': point.lat, 'y': point.lon}
p = {'x': location['lat'], 'y': location['lng']}
dist = ESDLGeometry.distance_point_to_line(p, p1, p2)
if dist < min_dist:
min_dist = dist
min_dist_segm = segm_ctr
begin_point = point
segm_ctr += 1
# copy appropriate points in original conductor to either line1 or line2
points = geometry.point
segm_ctr = 0
logger.debug('segment min = {}'.format(min_dist_segm))
for point in list(points):
if segm_ctr == min_dist_segm:
new_point = esdl.Point(lon=middle_point.lon, lat=middle_point.lat, elevation=middle_point.elevation)
line1.point.append(new_point)
line2.point.append(new_point.clone())
if segm_ctr < min_dist_segm:
line1.point.append(point)
else:
line2.point.append(point)
segm_ctr += 1
# find old ports and connections
ports = conductor.port
if len(ports) != 2:
send_alert('UNSUPPORTED: Conductor doesn\'t have two ports!')
return
port1 = ports[0] # reuse old conductor's ports; TODO: check what happens after deleting conductor
port2 = ports[1]
new_cond1_id = str(uuid.uuid4())
new_cond2_id = str(uuid.uuid4())
new_port1_id = str(uuid.uuid4())
new_port2_id = str(uuid.uuid4())
# create two conductors of same type as conductor that is splitted by duplicating the original
# e.g. also copy over the pipe material
new_cond1 = conductor.deepcopy()
new_cond2 = conductor.deepcopy()
new_cond1.id = new_cond1_id
new_cond1.port.clear() # remove existing port, as we add previous used ports later
new_cond2.id = new_cond2_id
new_cond2.port.clear()
esh.add_object_to_dict(active_es_id, new_cond1)
esh.add_object_to_dict(active_es_id, new_cond2)
if type(port1).__name__ == "InPort":
new_port2 = esdl.OutPort(id=new_port2_id, name='Out')
else:
new_port2 = esdl.InPort(id=new_port2_id, name='In')
new_cond1.port.append(port1)
new_cond1.port.append(new_port2)
if type(port2).__name__ == "InPort":
new_port1 = esdl.OutPort(id=new_port1_id, name='Out')
else:
new_port1 = esdl.InPort(id=new_port1_id, name='In')
if mode == 'connect':
new_port1.connectedTo.append(new_port2)
new_port2.connectedTo.append(new_port1)
new_cond2.port.append(new_port1)
new_cond2.port.append(port2)
esh.add_object_to_dict(active_es_id, new_port1)
esh.add_object_to_dict(active_es_id, new_port2)
# calculate line lengths
start = line1.point[0]
length = 0
for i in range(1, len(line1.point)):
length += ESDLGeometry.distance((start.lat, start.lon), (line1.point[i].lat, line1.point[i].lon)) * 1000
start = line1.point[i]
new_cond1.length = round(length, 2)
start = line2.point[0]
length = 0
for i in range(1, len(line2.point)):
length += ESDLGeometry.distance((start.lat, start.lon), (line2.point[i].lat, line2.point[i].lon)) * 1000
start = line2.point[i]
new_cond2.length = round(length, 2)
logger.debug('split-conductor: line1 length={}, line2 length={}'.format(new_cond1.length, new_cond2.length))
# assign line geometry to the correct conductor
new_cond1.geometry = line1
new_cond2.geometry = line2
# remove conductor from container (area or building) and add new two conductors
assets = conductor_container.asset
assets.remove(conductor)
conductor_container.asset.append(new_cond1)
conductor_container.asset.append(new_cond2)
# create list of ESDL assets to be added to UI
esdl_assets_to_be_added = []
coords1 = []
for point in line1.point:
coords1.append([point.lat, point.lon])
port_list = []
carrier = None
if port1.carrier: carrier = port1.carrier
if port2.carrier: carrier = port2.carrier
carrier_id = carrier.id if carrier is not None else None
for p in new_cond1.port:
p.carrier = carrier
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [p.id for p in p.connectedTo], 'carrier': carrier_id})
state = asset_state_to_ui(new_cond1)
tooltip_asset_attrs = get_tooltip_asset_attrs(new_cond1, 'line')
esdl_assets_to_be_added.append(['line', 'asset', new_cond1.name, new_cond1.id, type(new_cond1).__name__,
coords1, tooltip_asset_attrs, state, port_list])
coords2 = []
for point in line2.point:
coords2.append([point.lat, point.lon])
port_list = []
for p in new_cond2.port:
p.carrier = carrier
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [p.id for p in p.connectedTo], 'carrier': carrier_id})
state = asset_state_to_ui(new_cond2)
tooltip_asset_attrs = get_tooltip_asset_attrs(new_cond2, 'line')
esdl_assets_to_be_added.append(['line', 'asset', new_cond2.name, new_cond2.id, type(new_cond2).__name__,
coords2, tooltip_asset_attrs, state, port_list])
# update asset id's of conductor with new_cond1 and new_cond2 in conn_list
for c in conn_list:
if c['from-asset-id'] == conductor_id and c['from-port-id'] == port1.id:
c['from-asset-id'] = new_cond1_id
if c['from-asset-id'] == conductor_id and c['from-port-id'] == port2.id:
c['from-asset-id'] = new_cond2_id
if c['to-asset-id'] == conductor_id and c['to-port-id'] == port1.id:
c['to-asset-id'] = new_cond1_id
if c['to-asset-id'] == conductor_id and c['to-port-id'] == port2.id:
c['to-asset-id'] = new_cond2_id
# create list of connections to be added to UI
if mode == 'connect':
conn_list.append({'from-port-id': new_port2_id, 'from-port-carrier': carrier_id,
'from-asset-id': new_cond1_id, 'from-asset-coord': (middle_point.lat, middle_point.lon),
'to-port-id': new_port1_id, 'to-port-carrier': carrier_id, 'to-asset-id': new_cond2_id,
'to-asset-coord': (middle_point.lat, middle_point.lon)})
if mode == 'add_joint':
joint_id = str(uuid.uuid4())
joint = esdl.Joint(id=joint_id, name='Joint_'+joint_id[:4])
inp = esdl.InPort(id=str(uuid.uuid4()), name='In')
outp = esdl.OutPort(id=str(uuid.uuid4()), name='Out')
if carrier:
inp.carrier = carrier
outp.carrier = carrier
if type(new_port2).__name__ == "OutPort":
inp.connectedTo.append(new_port2)
new_port2_conn_to_id = inp.id
else:
outp.connectedTo.append(new_port2)
new_port2_conn_to_id = outp.id
if type(new_port1).__name__ == "InPort":
outp.connectedTo.append(new_port1)
new_port1_conn_to_id = outp.id
else:
inp.connectedTo.append(new_port1)
new_port1_conn_to_id = inp.id
joint.port.append(inp)
joint.port.append(outp)
joint.geometry = middle_point
conductor_container.asset.append(joint)
esh.add_object_to_dict(active_es_id, joint)
esh.add_object_to_dict(active_es_id, inp)
esh.add_object_to_dict(active_es_id, outp)
port_list = []
for p in joint.port:
p.carrier = carrier
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [p.id for p in p.connectedTo], 'carrier': carrier_id})
capability_type = ESDLAsset.get_asset_capability_type(joint)
state = asset_state_to_ui(joint)
tooltip_asset_attrs = get_tooltip_asset_attrs(joint, 'marker')
esdl_assets_to_be_added.append(['point', 'asset', joint.name, joint.id, type(joint).__name__,
[middle_point.lat, middle_point.lon], tooltip_asset_attrs, state, port_list,
capability_type])
conn_list.append({'from-port-id': new_port2_id, 'from-port-carrier': carrier_id,
'from-asset-id': new_cond1_id, 'from-asset-coord': (middle_point.lat, middle_point.lon),
'to-port-id': new_port2_conn_to_id, 'to-port-carrier': carrier_id,
'to-asset-id': joint.id, 'to-asset-coord': (middle_point.lat, middle_point.lon)})
conn_list.append({'from-port-id': new_port1_conn_to_id, 'from-port-carrier': carrier_id,
'from-asset-id': joint.id, 'from-asset-coord': (middle_point.lat, middle_point.lon),
'to-port-id': new_port1_id, 'to-port-carrier': carrier_id,
'to-asset-id': new_cond2_id, 'to-asset-coord': (middle_point.lat, middle_point.lon)})
# now send new objects to UI
emit('add_esdl_objects', {'es_id': active_es_id, 'asset_pot_list': esdl_assets_to_be_added, 'zoom': False})
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
else:
send_alert('UNSUPPORTED: Conductor is not of type esdl.Line!')
# ---------------------------------------------------------------------------------------------------------------------
# Update ESDL coordinates on movement of assets in browser
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on('update-coord', namespace='/esdl')
def update_coordinates(message):
# This function can also be called when the geometry of an asset is of type esdl.Polygon, because
# the asset on the leaflet map is both represented as a Polygon and a Point (to connect, to attach menus)
active_es_id = get_session('active_es_id')
esh = get_handler()
obj_id = message['id']
coords = message['coordinates']
object = esh.get_by_id(active_es_id, obj_id)
# object can be an EnergyAsset, Building, Potential or Note
if object:
if isinstance(object, esdl.Note):
geom = object.mapLocation
else:
geom = object.geometry
if isinstance(geom, esdl.Point):
point = esdl.Point(lon=float(coords['lng']), lat=float(coords['lat']))
if isinstance(object, esdl.Note):
object.mapLocation = point
else:
object.geometry = point
# elif isinstance(geom, esdl.Polygon):
# Do nothing in case of a polygon
# only update the connection locations and mappings based on the center of the polygon
# that is given as a parameter.
# update coordinates in asset_list
asset_list = get_session_for_esid(active_es_id, 'asset_list')
for a in asset_list:
if a[3] == obj_id:
a[5] = [coords['lat'], coords['lng']]
break # ready searching
if isinstance(object, (esdl.EnergyAsset, esdl.AbstractBuilding)):
# Update locations of connections on moving assets
update_asset_connection_locations(obj_id, coords['lat'], coords['lng'])
# TODO: Check if this is still required
if message['asspot'] == 'building':
send_alert("Assets in building with locations are not updated yet")
@socketio.on('update-line-coord', namespace='/esdl')
def update_line_coordinates(message):
# logger.debug ('received polyline: ' + str(message['id']) + ':' + str(message['polyline']))
ass_id = message['id']
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, ass_id)
if asset:
ports = asset.port
polyline_data = message['polyline']
# logger.debug(polyline_data)
# logger.debug(type(polyline_data))
polyline_length = float(message['length'])
asset.length = polyline_length
line = esdl.Line()
for i in range(0, len(polyline_data)):
coord = polyline_data[i]
point = esdl.Point(lon=coord['lng'], lat=coord['lat'])
line.point.append(point)
asset.geometry = line
# update coordinates in asset_list
asset_list = get_session_for_esid(active_es_id, 'asset_list')
for a in asset_list:
if a[3] == ass_id:
a[5] = [(coord['lat'], coord['lng']) for coord in polyline_data]
break # ready searching
update_transport_connection_locations(ass_id, asset, polyline_data)
@socketio.on('update-polygon-coord', namespace='/esdl')
def update_polygon_coordinates(message):
# logger.debug ('received polygon: ' + str(message['id']) + ':' + str(message['polygon']))
ass_id = message['id']
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, ass_id)
if asset:
polygon_data = message['polygon']
# logger.debug(polygon_data)
# logger.debug(type(polygon_data))
polygon_area = int(message['polygon_area'])
asset.surfaceArea = polygon_area
polygon_data = ESDLGeometry.remove_duplicates_in_polygon(polygon_data)
polygon_data = ESDLGeometry.remove_latlng_annotation_in_array_of_arrays(polygon_data)
polygon_data = ESDLGeometry.exchange_polygon_coordinates(polygon_data) # --> [lon, lat]
polygon = ESDLGeometry.convert_pcoordinates_into_polygon(polygon_data) # expects [lon, lat]
asset.geometry = polygon
polygon_center = ESDLGeometry.calculate_polygon_center(polygon)
update_polygon_asset_connection_locations(ass_id, polygon_center)
# ---------------------------------------------------------------------------------------------------------------------
# Control Strategies
# ---------------------------------------------------------------------------------------------------------------------
def get_control_strategies(es):
strategies = []
services = es.services
if services:
services_list = services.service
for service in services_list:
if isinstance(service, esdl.ControlStrategy):
strategies.append(service)
return strategies
def get_control_strategy_for_asset(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
return asset.controlStrategy
# strategies = get_control_strategies(es)
# for strategy in strategies:
# cs_a = strategy.energyAsset
# if cs_a.id == asset_id:
# return strategy
# return None
def add_control_strategy_for_asset(asset_id, cs):
active_es_id = get_session('active_es_id')
esh = get_handler()
es = esh.get_energy_system(es_id=active_es_id)
services = es.services
if not services:
services = esdl.Services()
es.services = services
services_list = services.service
for service in set(services_list):
if isinstance(service, esdl.ControlStrategy):
if service.energyAsset.id == asset_id:
services_list.remove(service)
services.service.append(cs)
def add_drivenby_control_strategy_for_asset(asset_id, control_strategy, port_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
module = importlib.import_module('esdl.esdl')
class_ = getattr(module, control_strategy)
cs = class_()
asset = esh.get_by_id(active_es_id, asset_id)
asset_name = asset.name
if not asset_name:
asset_name = 'unknown'
cs.id = str(uuid.uuid4())
cs.name = control_strategy + ' for ' + asset_name
cs.energyAsset = asset
if control_strategy == 'DrivenByDemand':
cs.outPort = next((p for p in esdl.Port.allInstances() if p.id == port_id), None)
if control_strategy == 'DrivenBySupply':
cs.inPort = next((p for p in esdl.Port.allInstances() if p.id == port_id), None)
add_control_strategy_for_asset(asset_id, cs)
def add_storage_control_strategy_for_asset(asset_id, mcc, mdc):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
if not asset.name:
asset.name = 'Unknown Asset'
cs = esdl.StorageStrategy()
cs.id = str(uuid.uuid4())
cs.name = 'StorageStrategy for ' + asset.name
cs.energyAsset = asset
mcc_sv = esdl.SingleValue(id=str(uuid.uuid4()), name='marginalChargeCosts for ' + asset.name, value=str2float(mcc))
cs.marginalChargeCosts = mcc_sv
mdc_sv = esdl.SingleValue(id=str(uuid.uuid4()), name='marginalChargeCosts for ' + asset.name, value=str2float(mdc))
cs.marginalDischargeCosts = mdc_sv
add_control_strategy_for_asset(asset_id, cs)
def add_curtailment_control_strategy_for_asset(asset_id, max_power):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
if not asset.name:
asset.name = 'Unknown Asset'
cs = esdl.CurtailmentStrategy()
cs.id = str(uuid.uuid4())
cs.name = 'CurtailmentStrategy for ' + asset.name
cs.energyAsset = asset
cs.maxPower = str2float(max_power)
add_control_strategy_for_asset(asset_id, cs)
def get_storage_marginal_costs(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
es = esh.get_energy_system(es_id=active_es_id)
services = es.services
if services:
services_list = services.service
for service in services_list:
if isinstance(service, esdl.StorageStrategy):
if service.energyAsset == asset:
mcc_sv = service.marginalChargeCosts
mdc_sv = service.marginalDischargeCosts
if mcc_sv:
mcc = mcc_sv.value
else:
mcc = 0
if mdc_sv:
mdc = mdc_sv.value
else:
mdc = 0
return mcc, mdc
return 0, 0
def get_curtailment_max_power(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
es = esh.get_energy_system(es_id=active_es_id)
services = es.services
if services:
services_list = services.service
for service in services_list:
if isinstance(service, esdl.CurtailmentStrategy):
if service.energyAsset == asset:
return service.maxPower
return 0
def remove_control_strategy_for_asset(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
cs = asset.controlStrategy
if cs:
cs.delete()
#services_collection = es.services
#if services_collection:
# services = services_collection.service
# for service in services:
# if isinstance(service, esdl.ControlStrategy):
# if service.energyAsset == asset_id:
# services.remove(service)
# ---------------------------------------------------------------------------------------------------------------------
# Marginal Costs
# ---------------------------------------------------------------------------------------------------------------------
def set_marginal_costs_for_asset(asset_id, marginal_costs):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
asset_name = asset.name
if not asset_name:
asset_name = asset.id
ci = asset.costInformation
if not ci:
ci = esdl.CostInformation()
asset.costInformation = ci
mc = ci.marginalCosts
if not mc:
mc = esdl.SingleValue()
mc.id = str(uuid.uuid4())
mc.name = asset_name + '-MarginalCosts'
ci.marginalCosts = mc
mc.value = marginal_costs
def get_marginal_costs_for_asset(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
ci = asset.costInformation
if ci:
mc = ci.marginalCosts
if mc:
return mc.value
return None
def str2float(string):
try:
f = float(string)
return f
except:
return 0.0
def get_first_last_of_line(line):
first = ()
last = ()
i = 0
for point in line.point:
if i == 0:
first = (point.lat, point.lon)
i+=1
last = (point.lat, point.lon)
return first, last
@executor.job
def call_process_energy_system(esh, filename=None, es_title=None, app_context=None, force_update_es_id=None, zoom=True):
process_energy_system(esh, filename, es_title, app_context, force_update_es_id, zoom)
# ---------------------------------------------------------------------------------------------------------------------
# React on commands from the browser (add, remove, ...)
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on('command', namespace='/esdl')
def process_command(message):
logger.info('received: ' + message['cmd'])
if not valid_session():
send_alert("Session has timed out, please refresh")
return
#logger.debug (message)
#logger.debug (session)
user_email = get_session('user-email')
user_actions_logging.store_logging(user_email, "command", message['cmd'], json.dumps(message), "", {})
active_es_id = get_session('active_es_id')
if active_es_id is None:
send_alert('Serious error: no active es id found. Please report')
return
esh = get_handler()
if esh is None:
logger.error('ERROR finding EnergySystemHandler, Session issue??')
area_bld_list = get_session_for_esid(active_es_id, 'area_bld_list')
es_edit = esh.get_energy_system(es_id=active_es_id)
# test to see if this should be moved down:
# session.modified = True
# logger.debug (get_handler().instance[0].area.name)
if message['cmd'] == 'add_object':
area_bld_id = message['area_bld_id']
asset_id = message['asset_id']
object_type = message['object']
asset_name = message['asset_name']
asset = None
shape = message['shape']
geometry = ESDLGeometry.create_ESDL_geometry(shape)
if object_type == 'Area':
if not isinstance(geometry, esdl.Polygon):
send_alert('Areas with geometries other than polygons are not supported')
else:
if isinstance(geometry, esdl.Polygon):
new_area = esdl.Area(id=asset_id, name=asset_name)
new_area.geometry = geometry
# Update drop down list with areas and buildings
add_area_to_area_bld_list(new_area, area_bld_id, area_bld_list)
emit('area_bld_list', {'es_id': active_es_id, 'area_bld_list': area_bld_list})
# Add area to the indicated area
if not ESDLEnergySystem.add_area_to_area(es_edit, new_area, area_bld_id):
send_alert('Can not add area to building')
# Send new area shapes to the browser
area_list = []
boundary_wgs = ESDLGeometry.create_boundary_from_geometry(geometry)
area_list.append(ESDLGeometry.create_geojson(new_area.id, new_area.name, [], boundary_wgs))
esh.add_object_to_dict(active_es_id, new_area)
emit('geojson', {"layer": "area_layer", "geojson": area_list})
else:
send_alert('Can not add an area with another shap than a Polygon')
else:
edr_asset_str = get_session('adding_edr_assets')
if edr_asset_str:
asset = ESDLAsset.load_asset_from_string(edr_asset_str)
# TODO: deepcopy does not work.
# asset = copy.deepcopy(edr_asset)
# Quick fix: session variable adding_edr_assets now contains ESDL string
class_ = type(asset)
object_type = class_.__name__
print(asset)
# Check if any IDs were 'accidentally' set in EDR model template and replace them by a new unique ID
# If no ID was set, assign no new ID either
for c in asset.eContents:
if c.eClass.findEStructuralFeature('id'):
if c.eGet('id'):
c.eSet('id', str(uuid.uuid4()))
else:
asset_drawing_mode = get_session('asset_drawing_mode')
if asset_drawing_mode == 'asset_from_measures':
asset_from_measure_id = get_session('asset_from_measure_id')
asset = AssetsToBeAdded.get_instance_of_measure_with_asset_id(es_edit, asset_from_measure_id)
atba = AssetsToBeAdded.get_instance()
atba.reduce_ui_asset_count(es_edit, asset_from_measure_id)
class_ = type(asset)
object_type = class_.__name__
else:
module = importlib.import_module('esdl.esdl')
class_ = getattr(module, object_type)
asset = class_()
if issubclass(class_, esdl.Potential):
potential = class_()
potential.id = asset_id
potential.name = asset_name
potential.geometry = geometry
add_to_building = False
if not ESDLAsset.add_object_to_area(es_edit, potential, area_bld_id):
ESDLAsset.add_object_to_building(es_edit, potential, area_bld_id)
add_to_building = True
potentials_to_be_added = []
if isinstance(geometry, esdl.Point):
potentials_to_be_added.append(
['point', 'potential', potential.name, potential.id, type(potential).__name__,
[geometry.lat, geometry.lon]])
elif isinstance(geometry, esdl.Polygon):
coords = ESDLGeometry.parse_esdl_subpolygon(potential.geometry.exterior, False) # [lon, lat]
coords = ESDLGeometry.exchange_coordinates(coords)
potentials_to_be_added.append(
['polygon', 'potential', potential.name, potential.id, type(potential).__name__, coords])
if potentials_to_be_added:
emit('add_esdl_objects', {'es_id': es_edit.id, 'add_to_building': add_to_building,
'asset_pot_list': potentials_to_be_added, 'zoom': False})
esh.add_object_to_dict(active_es_id, potential)
else:
asset.id = asset_id
asset.name = asset_name
asset.geometry = geometry
if isinstance(geometry, esdl.Point):
port_loc = (shape['coordinates']['lat'], shape['coordinates']['lng'])
elif isinstance(geometry, esdl.Polygon):
port_loc = ESDLGeometry.calculate_polygon_center(geometry)
polygon_area = int(shape['polygon_area'])
if not isinstance(asset, esdl.AbstractBuilding):
if asset.surfaceArea:
if asset.power:
asset.power = asset.power * polygon_area / asset.surfaceArea
asset.surfaceArea = polygon_area
else:
asset.surfaceArea = polygon_area
if not isinstance(asset, esdl.AbstractBuilding):
# -------------------------------------------------------------------------------------------------------------
# Add assets with a polyline geometry and an InPort and an OutPort
# -------------------------------------------------------------------------------------------------------------
if object_type in ['ElectricityCable', 'Pipe']:
inp = esdl.InPort(id=str(uuid.uuid4()), name='In')
asset.port.append(inp)
outp = esdl.OutPort(id=str(uuid.uuid4()), name='Out')
asset.port.append(outp)
asset.length = float(shape['length']) if 'length' in shape else 0.0
print(message)
# automatically connect the conductor to the ports that have been clicked
if 'connect_ports' in message and message['connect_ports'] != '':
connect_ports_msg = message['connect_ports']
start_port = None
end_port = None
from_port1 = None
to_port1 = None
from_port2 = None
to_port2 = None
if 'asset_start_port' in connect_ports_msg:
asset_start_port = connect_ports_msg['asset_start_port']
start_port = esh.get_by_id(active_es_id, asset_start_port)
if 'asset_end_port' in connect_ports_msg:
asset_end_port = connect_ports_msg['asset_end_port']
end_port = esh.get_by_id(active_es_id, asset_end_port)
# cannot connect to same port type
if start_port is not None and end_port is not None and \
type(start_port) == type(end_port):
other_type = esdl.InPort.eClass.name if isinstance(start_port, esdl.OutPort) \
else esdl.OutPort.eClass.name
send_alert(
"Please connect the {} to an {}".format(object_type, other_type))
return
require_reversed = False # to indicate the coordinates of the line need reversal
if start_port is not None:
if isinstance(start_port, esdl.OutPort):
inp.connectedTo.append(start_port)
from_port1 = inp
to_port1 = start_port
elif isinstance(start_port, esdl.InPort):
outp.connectedTo.append(start_port)
from_port1 = outp
to_port1 = start_port
require_reversed = True
if end_port is not None:
if isinstance(end_port, esdl.InPort):
outp.connectedTo.append(end_port)
from_port2 = outp
to_port2 = end_port
elif isinstance(end_port, esdl.OutPort):
inp.connectedTo.append(end_port)
from_port2 = inp
to_port2 = end_port
require_reversed = True
if require_reversed:
line: esdl.Line = asset.geometry # reverse coordinate to change direction of line
point = list(line.point) # copy list
line.point.clear()
for p in point:
line.point.insert(0, p) # reverse list of coordinates
# Send connections
add_to_building = False # TODO: Fix using this inside buildings
conn_list = get_session_for_esid(active_es_id, 'conn_list')
carrier_id = None
if start_port:
if isinstance(start_port, esdl.InPort):
asset1_port_location = asset.geometry.point[-1]
else:
asset1_port_location = asset.geometry.point[0]
if start_port.carrier is not None:
carrier_id = start_port.carrier.id
inp.carrier = start_port.carrier
outp.carrier = start_port.carrier
if end_port is not None and end_port.carrier is None:
if isinstance(end_port.energyasset, esdl.Joint): # in case of a joint: set the carrier for all ports
for p in end_port.energyasset.port:
p.carrier = start_port.carrier if p.carrier is None else p.carrier
else:
end_port.carrier = start_port.carrier
if end_port:
if isinstance(end_port, esdl.InPort):
asset2_port_location = asset.geometry.point[-1]
else:
asset2_port_location = asset.geometry.point[0]
if end_port.carrier is not None and carrier_id is None: # no start_port carrier
carrier_id = end_port.carrier.id
inp.carrier = end_port.carrier
outp.carrier = end_port.carrier
if start_port is not None and start_port.carrier is None:
if isinstance(start_port.energyasset, esdl.Joint): # in case of a joint: set the carrier for all ports
for p in start_port.energyasset.port:
p.carrier = end_port.carrier if p.carrier is None else p.carrier
else:
start_port.carrier = end_port.carrier
# send messages to update connections and start port / end port marker colors based on
# the carriers
if start_port:
conn_message = {'from-port-id': from_port1.id,
'from-port-carrier': from_port1.carrier.id if from_port1.carrier else None,
'from-asset-id': from_port1.eContainer().id,
'from-asset-coord': [asset1_port_location.lat, asset1_port_location.lon],
'to-port-id': to_port1.id,
'to-port-carrier': to_port1.carrier.id if to_port1.carrier else None,
'to-asset-id': to_port1.eContainer().id,
'to-asset-coord': [asset1_port_location.lat, asset1_port_location.lon]}
conn_list.append(conn_message)
emit('add_connections', {"es_id": active_es_id, "conn_list": [conn_message]})
# update ports of from_port asset
from_asset = start_port.eContainer()
port_list = []
for p in from_asset.port:
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo],
'carrier': p.carrier.id if p.carrier else None})
emit('update_asset', {'asset_id': from_asset.id, 'ports': port_list})
if end_port:
conn_message = {'from-port-id': from_port2.id,
'from-port-carrier': from_port2.carrier.id if from_port2.carrier else None,
'from-asset-id': from_port2.eContainer().id,
'from-asset-coord': [asset2_port_location.lat, asset2_port_location.lon],
'to-port-id': to_port2.id,
'to-port-carrier': to_port2.carrier.id if to_port2.carrier else None,
'to-asset-id': to_port2.eContainer().id,
'to-asset-coord': [asset2_port_location.lat, asset2_port_location.lon]}
conn_list.append(conn_message)
emit('add_connections', {"es_id": active_es_id, "conn_list": [conn_message]})
# update ports of from_port asset
to_asset = end_port.eContainer()
port_list = []
for p in to_asset.port:
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo],
'carrier': p.carrier.id if p.carrier else None})
emit('update_asset', {'asset_id': to_asset.id, 'ports': port_list})
# -------------------------------------------------------------------------------------------------------------
# Add assets with an InPort and two OutPorts (either point or polygon)
# -------------------------------------------------------------------------------------------------------------
elif object_type in ['CHP', 'FuelCell']:
inp = esdl.InPort(id=str(uuid.uuid4()), name='In')
asset.port.append(inp)
e_outp = esdl.OutPort(id=str(uuid.uuid4()), name='E Out')
asset.port.append(e_outp)
h_outp = esdl.OutPort(id=str(uuid.uuid4()), name='H Out')
asset.port.append(h_outp)
else:
capability = ESDLAsset.get_asset_capability_type(asset)
# The view mode influences if single or double ports are added
double_line_mode = False
view_modes = ViewModes.get_instance()
if view_modes.get_user_settings(user_email)['mode'] == 'CHESS':
double_line_mode = True
if capability == 'Producer':
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='Out'))
if double_line_mode:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='In'))
elif capability in ['Consumer', 'Storage']:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='In'))
if double_line_mode:
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='Out'))
elif capability == 'Conversion':
if object_type == "HeatPump" and double_line_mode:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='PrimIn'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='PrimOut'))
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='SecIn'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='SecOut'))
else:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='In'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='Out'))
elif capability == 'Transport':
if object_type == 'HeatExchange' or object_type == 'Transformer':
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='PrimIn'))
if double_line_mode:
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='PrimOut'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='SecOut'))
if double_line_mode:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='SecIn'))
else:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='In'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='Out'))
else:
logger.error('Unknown asset capability {}'.format(capability))
else:
# Update drop down list with areas and buildings
add_bld_to_area_bld_list(asset, area_bld_id, area_bld_list)
emit('area_bld_list', {'es_id': active_es_id, 'area_bld_list': area_bld_list})
add_to_building = False
if not ESDLAsset.add_object_to_area(es_edit, asset, area_bld_id):
ESDLAsset.add_object_to_building(es_edit, asset, area_bld_id)
add_to_building = True
asset_to_be_added_list = []
buildings_to_be_added_list = []
# TODO: check / solve cable as Point issue?
if not isinstance(asset, esdl.AbstractBuilding):
port_list = []
ports = asset.port
for p in ports:
connTo_ids = list(o.id for o in p.connectedTo)
carrier_id = p.carrier.id if p.carrier else None
port_list.append(
{'name': p.name, 'id': p.id, 'type': type(p).__name__, 'conn_to': connTo_ids,
'carrier': carrier_id})
if isinstance(asset, esdl.AbstractBuilding):
if isinstance(geometry, esdl.Point):
buildings_to_be_added_list.append(['point', asset.name, asset.id, type(asset).__name__,
[shape['coordinates']['lat'], shape['coordinates']['lng']],
False, {}])
elif isinstance(geometry, esdl.Polygon):
coords = ESDLGeometry.parse_esdl_subpolygon(asset.geometry.exterior, False) # [lon, lat]
coords = ESDLGeometry.exchange_coordinates(coords) # --> [lat, lon]
boundary = ESDLGeometry.create_boundary_from_geometry(geometry)
buildings_to_be_added_list.append(['polygon', asset.name, asset.id, type(asset).__name__,
boundary["coordinates"], False, {}])
emit('add_building_objects', {'es_id': es_edit.id, 'building_list': buildings_to_be_added_list,
'zoom': False})
else:
capability_type = ESDLAsset.get_asset_capability_type(asset)
state = asset_state_to_ui(asset)
if isinstance(geometry, esdl.Point):
tooltip_asset_attrs = get_tooltip_asset_attrs(asset, 'marker')
asset_to_be_added_list.append(['point', 'asset', asset.name, asset.id, type(asset).__name__,
[shape['coordinates']['lat'], shape['coordinates']['lng']],
tooltip_asset_attrs, state, port_list, capability_type])
elif isinstance(geometry, esdl.Polygon):
coords = ESDLGeometry.parse_esdl_subpolygon(asset.geometry.exterior, False) # [lon, lat]
coords = ESDLGeometry.exchange_coordinates(coords) # --> [lat, lon]
# logger.debug(coords)
tooltip_asset_attrs = get_tooltip_asset_attrs(asset, 'polygon')
asset_to_be_added_list.append(
['polygon', 'asset', asset.name, asset.id, type(asset).__name__, coords,
tooltip_asset_attrs, state, port_list, capability_type])
elif isinstance(geometry, esdl.Line):
coords = []
for point in geometry.point:
coords.append([point.lat, point.lon])
tooltip_asset_attrs = get_tooltip_asset_attrs(asset, 'line')
asset_to_be_added_list.append(['line', 'asset', asset.name, asset.id, type(asset).__name__,
coords, tooltip_asset_attrs, state, port_list])
#logger.debug(asset_to_be_added_list)
emit('add_esdl_objects', {'es_id': es_edit.id, 'add_to_building': add_to_building,
'asset_pot_list': asset_to_be_added_list, 'zoom': False})
asset_list = get_session_for_esid(es_edit.id, 'asset_list')
for al_asset in asset_to_be_added_list:
asset_list.append(al_asset)
esh.add_object_to_dict(es_edit.id, asset)
if hasattr(asset, 'port'):
for added_port in asset.port:
esh.add_object_to_dict(es_edit.id, added_port)
set_handler(esh)
if message['cmd'] == 'remove_object':
# removes asset or potential from EnergySystem
obj_id = message['id']
if obj_id:
# asset = ESDLAsset.find_asset(es_edit.instance[0].area, obj_id)
# asset can also be any other object in ESDL
asset = esh.get_by_id(active_es_id, obj_id)
if isinstance(asset, esdl.AbstractBuilding):
# Update drop down list with areas and buildings
remove_ab_from_area_bld_list(asset.id, area_bld_list)
emit('area_bld_list', {'es_id': active_es_id, 'area_bld_list': area_bld_list})
if asset:
# Try to remove control strategy for EnergyAssets (and not for buildings)
if isinstance(asset, esdl.EnergyAsset):
remove_control_strategy_for_asset(asset.id)
ESDLAsset.remove_object_from_energysystem(es_edit, obj_id)
esh.remove_object_from_dict(es_edit.id, asset, True)
# remove from asset dict
asset_list = get_session_for_esid(active_es_id, 'asset_list')
asset_list[:] = [a for a in asset_list if a[3] != obj_id] # filter list in place
else:
send_alert('Asset or potential without an id cannot be removed')
if message['cmd'] == 'add_note':
id = message['id']
location = message['location']
author = message['author']
note = esdl.Note(id=id, author=author)
dt = parse_date(message['date'])
if dt:
note.date = EDate.from_string(str(dt))
else:
send_alert('Invalid datetime format')
point = esdl.Point(lat=location['lat'], lon=location['lng'])
note.mapLocation = point
esh.add_object_to_dict(es_edit.id, note)
esi = es_edit.energySystemInformation
if not esi:
esi = esdl.EnergySystemInformation(id=str(uuid.uuid4()))
es_edit.energySystemInformation = esi
esh.add_object_to_dict(es_edit.id, esi)
notes = esi.notes
if not notes:
notes = esdl.Notes(id=str(uuid.uuid4()))
esi.notes = notes
esh.add_object_to_dict(es_edit.id, notes)
notes.note.append(note)
esh.add_object_to_dict(es_edit.id, note)
if message['cmd'] == 'remove_area':
area_id = message['id']
if area_id:
top_area = es_edit.instance[0].area
if top_area:
if top_area.id == area_id:
send_alert('Can not remove top level area')
elif not ESDLEnergySystem.remove_area(top_area, area_id):
send_alert('Area could not be removed')
if message['cmd'] == 'get_asset_ports':
asset_id = message['id']
port_list = []
if asset_id:
asset = ESDLAsset.find_asset(es_edit.instance[0].area, asset_id)
ports = asset.port
for p in ports:
port_list.append({'id': p.id, 'type': type(p).__name__})
emit('portlist', port_list)
if message['cmd'] == 'connect_ports':
port1_id = message['port1id']
port2_id = message['port2id']
# still not optimal, but done to get rid of mapping, optimize later
asset_and_coord1 = get_asset_and_coord_from_port_id(esh, active_es_id, port1_id)
asset_and_coord2 = get_asset_and_coord_from_port_id(esh, active_es_id, port2_id)
asset1 = asset_and_coord1['asset']
asset2 = asset_and_coord2['asset']
asset1_port_location = asset_and_coord1['coord']
asset2_port_location = asset_and_coord2['coord']
port1 = None
port2 = None
for p in asset1.port:
if p.id == port1_id:
port1 = p
break
for p in asset2.port:
if p.id == port2_id:
port2 = p
break
if port1 and port2:
# add type check on ports
if type(port1).__name__ == type(port2).__name__:
send_alert('Cannot connect ports of the same type. One should be an InPort and one should be an OutPort')
else:
connect_ports(port1, port2)
add_to_building = False
if asset1.containingBuilding:
asset1_bld_id = asset1.containingBuilding.id
if asset2.containingBuilding:
if asset1.containingBuilding:
# assets both in buildings
if asset1_bld_id == asset2.containingBuilding.id:
# assets in same building
add_to_building = True
else:
# assets in different buildings
bld_asset1 = asset1.containingBuilding
asset1_port_location = (bld_asset1.geometry.lat, bld_asset1.geometry.lon)
bld_asset2 = asset2.containingBuilding
asset2_port_location = (bld_asset2.geometry.lat, bld_asset2.geometry.lon)
add_to_building = False
else:
# asset2 in building and asset1 not in building
bld_asset2 = asset2.containingBuilding
asset2_port_location = (bld_asset2.geometry.lat, bld_asset2.geometry.lon)
add_to_building = False
else:
# asset2 not in building
if asset1.containingBuilding:
# asset1 in building and asset2 not in building
bld_asset1 = asset1.containingBuilding
asset1_port_location = (bld_asset1.geometry.lat, bld_asset1.geometry.lon)
add_to_building = False
else:
# both assets not in building
add_to_building = False
# emit('add_new_conn', {'es_id': es_edit.id, 'add_to_building': add_to_building,
# 'from-port-id': port1_id, 'to-port-id': port2_id,
# 'new_conn': [[asset1_port_location[0], asset1_port_location[1]],
# [asset2_port_location[0], asset2_port_location[1]]]})
# propagate carrier
if not port2.carrier and port1.carrier:
if isinstance(port2.energyasset, esdl.Joint):
for p in port2.energyasset.port: # porpagate carrier in case of a joint
p.carrier = port1.carrier if p.carrier is None else p.carrier
else:
port2.carrier = port1.carrier
elif port2.carrier and not port1.carrier:
if isinstance(port1.energyasset, esdl.Joint):
for p in port1.energyasset.port: # porpagate carrier in case of a joint
p.carrier = port1.carrier if p.carrier is None else p.carrier
else:
port1.carrier = port2.carrier
p1_carr_id = port1.carrier.id if port1.carrier else None
p2_carr_id = port2.carrier.id if port2.carrier else None
conn_list = get_session_for_esid(active_es_id, 'conn_list')
conn_message = {'from-port-id': port1_id, 'from-port-carrier': p1_carr_id, 'from-asset-id': asset1.id,
'from-asset-coord': [asset1_port_location[0], asset1_port_location[1]],
'to-port-id': port2_id, 'to-port-carrier': p2_carr_id, 'to-asset-id': asset2.id,
'to-asset-coord': [asset2_port_location[0], asset2_port_location[1]]}
conn_list.append(conn_message)
emit('add_connections', {"es_id": active_es_id, "conn_list": [conn_message]})
# update ports of assets that are connected
port_list = []
for p in asset1.port:
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo],
'carrier': p.carrier.id if p.carrier else None})
emit('update_asset', {'asset_id': asset1.id, 'ports': port_list})
port_list = []
for p in asset2.port:
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo],
'carrier': p.carrier.id if p.carrier else None})
emit('update_asset', {'asset_id': asset2.id, 'ports': port_list})
else:
send_alert('Serious error connecting ports')
if message['cmd'] == 'get_object_info':
object_id = message['id']
asspot = message['asspot']
area = es_edit.instance[0].area
connected_to_info = []
ctrl_strategy = None
if asspot == 'asset':
# asset = ESDLAsset.find_asset(area, object_id)
asset = esh.get_by_id(es_edit.id, object_id)
logger.debug('Get info for asset ' + asset.id)
attrs_sorted = ESDLEcore.get_asset_attributes(asset, esdl_doc)
name = asset.name
if isinstance(asset, esdl.EnergyAsset):
connected_to_info = get_connected_to_info(asset)
if asset.controlStrategy:
ctrl_strategy = asset.controlStrategy.name
else:
ctrl_strategy = None
asset_class = 'EnergyAsset'
else:
asset_class = 'AbstractBuilding'
asset_doc = asset.__doc__
else:
pot = esh.get_by_id(es_edit.id, object_id)
logger.debug('Get info for potential ' + pot.id)
attrs_sorted = ESDLEcore.get_asset_attributes(pot, esdl_doc)
name = pot.name
connected_to_info = []
ctrl_strategy = None
asset_doc = pot.__doc__
if name is None: name = ''
emit('asset_info', {'id': object_id, 'name': name, 'class': asset_class, 'attrs': attrs_sorted, 'connected_to_info': connected_to_info, 'ctrl_strategy': ctrl_strategy, 'asset_doc': asset_doc})
if message['cmd'] == 'get_conductor_info':
asset_id = message['id']
latlng = message['latlng']
area = es_edit.instance[0].area
asset = ESDLAsset.find_asset(area, asset_id)
connected_to_info = get_connected_to_info(asset)
logger.debug('Get info for conductor ' + asset.id)
attrs_sorted = ESDLEcore.get_asset_attributes(asset, esdl_doc)
name = asset.name
if name is None: name = ''
asset_doc = asset.__doc__
emit('asset_info', {'id': asset_id, 'name': name, 'class': 'EnergyAsset', 'latlng': latlng, 'attrs': attrs_sorted, 'connected_to_info': connected_to_info, 'asset_doc': asset_doc})
if message['cmd'] == 'get_table_editor_info':
producer_info_list = []
consumer_info_list = []
transport_info_list = []
storage_info_list = []
conversion_info_list = []
energy_assets = esh.get_all_instances_of_type(esdl.EnergyAsset, active_es_id)
for asset in energy_assets:
attrs_sorted = ESDLEcore.get_asset_attributes(asset, esdl_doc)
connected_to_info = get_connected_to_info(asset)
strategy_info = get_control_strategy_info(asset)
profile_info = get_port_profile_info(asset)
mc_info = None
ci = asset.costInformation
if ci:
mc = ci.marginalCosts
if mc:
mc_info = mc.value
name = asset.name
if name is None: name = ''
asset_doc = asset.__doc__
asset_type = type(asset).__name__
asset_info = {
'id': asset.id,
'name': name,
'type': asset_type,
'attrs': attrs_sorted,
'connected_to_info': connected_to_info,
'control_strategy': strategy_info,
'marginal_costs': mc_info,
'profile_info': profile_info,
'asset_doc': asset_doc
}
if isinstance(asset, esdl.Producer):
producer_info_list.append(asset_info)
if isinstance(asset, esdl.Consumer):
consumer_info_list.append(asset_info)
if isinstance(asset, esdl.Transport):
transport_info_list.append(asset_info)
if isinstance(asset, esdl.Storage):
storage_info_list.append(asset_info)
if isinstance(asset, esdl.Conversion):
if not strategy_info:
logger.debug("================== NO CONTROL STRATEGY ===================")
conversion_info_list.append(asset_info)
# Sort arrays on asset_type
# attrs_sorted = sorted(attributes, key=lambda a: a['name'])
producer_info_list = sorted(producer_info_list, key=lambda a: (a['type'], a['name']))
consumer_info_list = sorted(consumer_info_list, key=lambda a: (a['type'], a['name']))
transport_info_list = sorted(transport_info_list, key=lambda a: (a['type'], a['name']))
storage_info_list = sorted(storage_info_list, key=lambda a: (a['type'], a['name']))
conversion_info_list = sorted(conversion_info_list, key=lambda a: (a['type'], a['name']))
emit('table_editor', {
'producer': producer_info_list,
'consumer': consumer_info_list,
'transport': transport_info_list,
'storage': storage_info_list,
'conversion': conversion_info_list
})
if message['cmd'] == 'set_asset_param':
if 'id' not in message or message['id'] is None:
fragment = message['fragment']
asset_id = None
else:
fragment = None
asset_id = message['id']
param_name = message['param_name']
param_value = message['param_value']
if asset_id is None:
resource = esh.get_resource(active_es_id)
assets = [resource.resolve(fragment)]
else:
if isinstance(asset_id, list):
assets = []
for ass_id in asset_id:
assets.append(esh.get_by_id(active_es_id, ass_id))
else:
assets = [esh.get_by_id(active_es_id, asset_id)]
for asset in assets:
logger.debug('Set param '+ param_name + ' for class ' + asset.eClass.name + ' to value '+ str(param_value))
try:
attribute = asset.eClass.findEStructuralFeature(param_name)
if attribute is not None:
if attribute.many:
#length = len(param_value)
eCollection = asset.eGet(param_name)
eCollection.clear() # TODO no support for multi-select of enums
print('after clear', eCollection)
if not isinstance(param_value, list):
param_value = [param_value]
for item in param_value:
parsed_value = attribute.eType.from_string(item)
eCollection.append(parsed_value)
else:
if param_value == "" or param_value is None:
parsed_value = attribute.eType.default_value
else:
parsed_value = attribute.eType.from_string(param_value)
if attribute.name == 'id':
esh.remove_object_from_dict(active_es_id, asset)
asset.eSet(param_name, parsed_value)
esh.add_object_to_dict(active_es_id, asset)
else:
asset.eSet(param_name, parsed_value)
else:
send_alert('Error setting attribute {} of {} to {}, unknown attribute'.format(param_name, asset.name, param_value))
except Exception as e:
logger.error('Error setting attribute {} of {} to {}, caused by {}'.format(param_name, asset.name, param_value, str(e)))
send_alert('Error setting attribute {} of {} to {}, caused by {}'.format(param_name, asset.name, param_value, str(e)))
# update gui, only if necessary for EnergyAssets, and Ports
# and EnergySystem ans
# update_gui = False
# update_asset = asset
# if isinstance(asset, esdl.EnergySystem):
# #emit()
# # todo find out how to update energy system name and update Area name in dropdown
# pass
# elif isinstance(asset, esdl.EnergyAsset):
# if param_name == esdl.EnergyAsset.name.name:
# update_gui = True
# if param_name == esdl.EnergyAsset.state.name:
# update_gui = True
# elif isinstance(asset, esdl.Port):
# update_gui = True
# update_asset = asset.energyasset
#
# if update_gui:
# emit('delete_esdl_object', {'asset_id': update_asset.id})
# asset_ui, conn_list = energy_asset_to_ui(esh, active_es_id, update_asset)
# emit("add_esdl_objects",
# {
# "es_id": active_es_id,
# "asset_pot_list": [asset_ui],
# "zoom": False,
# })
# emit("add_connections",{"es_id": active_es_id, "conn_list": conn_list})
if message['cmd'] == 'set_area_bld_polygon':
area_bld_id = message['area_bld_id']
polygon_data = message['polygon']
polygon = esdl.Polygon()
exterior = esdl.SubPolygon()
polygon.exterior = exterior
i = 0
prev_lat = 0
prev_lng = 0
while i < len(polygon_data[0]):
coord = polygon_data[0][i]
if i == 0:
first = (coord['lat'], coord['lng'])
if i == len(polygon_data) - 1:
last = (coord['lat'], coord['lng'])
# Don't understand why, but sometimes coordinates come in twice
if prev_lat != coord['lat'] or prev_lng != coord['lng']:
point = esdl.Point(lat=coord['lat'], lon=coord['lng'])
exterior.point.append(point)
prev_lat = coord['lat']
prev_lng = coord['lng']
i += 1
area = es_edit.instance[0].area
area_selected = ESDLEnergySystem.find_area(area, area_bld_id)
if area_selected:
area_selected.geometry = polygon
else:
bld_selected = ESDLAsset.find_asset(area, area_bld_id)
if bld_selected:
bld_selected.geometry = polygon
else:
send_alert('SERIOUS ERROR: set_area_bld_polygon - connot find area or building')
if message['cmd'] == 'split_conductor':
cond_id = message['id']
mode = message['mode'] # connect, add_joint, no_connect
location_to_split = message['location']
area = es_edit.instance[0].area
conductor, container = ESDLAsset.find_asset_and_container(area, cond_id)
split_conductor(conductor, location_to_split, mode, container)
if message['cmd'] == 'get_port_profile_info':
port_id = message['port_id']
asset = get_asset_from_port_id(esh, active_es_id, port_id)
if asset:
ports = asset.port
for p in ports:
if p.id == port_id:
profile = p.profile
if profile:
profile_info_list = generate_profile_info(profile)
emit('port_profile_info', {'port_id': port_id, 'profile_info': profile_info_list})
else:
emit('port_profile_info', {'port_id': port_id, 'profile_info': []})
if message['cmd'] == 'add_profile_to_port':
port_id = message['port_id']
profile_class = message['profile_class']
quap_type = message["qaup_type"]
if profile_class == 'SingleValue':
value = message['value']
esdl_profile = esdl.SingleValue()
esdl_profile.value = str2float(value)
elif profile_class == 'DateTimeProfile':
esdl_profile = esdl.DateTimeProfile()
# TODO: Determine how to deal with DateTimeProfiles in the UI
else:
# Assume all other options are InfluxDBProfiles
multiplier = message['multiplier']
profiles = Profiles.get_instance().get_profiles()['profiles']
for pkey in profiles:
p = profiles[pkey]
if p['profile_uiname'] == profile_class:
esdl_profile = esdl.InfluxDBProfile()
esdl_profile.multiplier = str2float(multiplier)
esdl_profile.measurement = p['measurement']
esdl_profile.field = p['field']
if 'host' in p and p['host']:
esdl_profile.host = p['host']
if 'port' in p and p['port']:
esdl_profile.port = int(p['port'])
else:
esdl_profile.host = settings.profile_database_config['protocol'] + "://" + \
settings.profile_database_config['host']
esdl_profile.port = int(settings.profile_database_config['port'])
esdl_profile.database = p['database']
esdl_profile.filters = settings.profile_database_config['filters']
if 'start_datetime' in p:
dt = parse_date(p['start_datetime'])
if dt:
esdl_profile.startDate = EDate.from_string(str(dt))
else:
send_alert('Invalid datetime format')
if 'end_datetime' in p:
dt = parse_date(p['end_datetime'])
if dt:
esdl_profile.endDate = EDate.from_string(str(dt))
else:
send_alert('Invalid datetime format')
if quap_type == 'predefined_qau':
# socket.emit('command', {cmd: 'add_profile_to_port', port_id: port_id, value: profile_mult_value,
# profile_class: profile_class, quap_type: qaup_type, predefined_qau: predefined_qau});
predefined_qau = message["predefined_qau"]
for pqau in esdl_config.esdl_config['predefined_quantity_and_units']:
if pqau['id'] == predefined_qau:
try:
# check if predefined qau is already present in the ESDL
qau = esh.get_by_id(active_es_id, predefined_qau)
except KeyError:
qau = ESDLQuantityAndUnits.build_qau_from_dict(pqau)
esi_qau = ESDLQuantityAndUnits.get_or_create_esi_qau(esh, active_es_id)
esi_qau.quantityAndUnit.append(qau)
esh.add_object_to_dict(active_es_id, qau)
#qau.id = str(uuid.uuid4()) # generate new id for predifined qua otherwise double ids appear
break
# make a reference instead of a direct link
qau_ref = esdl.QuantityAndUnitReference(reference=qau)
esdl_profile.profileQuantityAndUnit = qau_ref
elif quap_type == 'custom_qau':
# socket.emit('command', {cmd: 'add_profile_to_port', port_id: port_id, value: profile_mult_value,
# profile_class: profile_class, quap_type: qaup_type, custom_qau: custom_qau});
custom_qau = message["custom_qau"]
qau = ESDLQuantityAndUnits.build_qau_from_dict(custom_qau)
esdl_profile.profileQuantityAndUnit = qau
elif quap_type == 'profiletype':
# socket.emit('command', {cmd: 'add_profile_to_port', port_id: port_id, value: profile_mult_value,
# profile_class: profile_class, quap_type: qaup_type, profile_type: profile_type});
profile_type = message['profile_type']
esdl_profile.profileType = esdl.ProfileTypeEnum.from_string(profile_type)
esdl_profile.id = str(uuid.uuid4())
esh.add_object_to_dict(es_edit.id, esdl_profile)
asset = get_asset_from_port_id(esh, active_es_id, port_id)
if asset:
ports = asset.port
for p in ports:
if p.id == port_id:
# p.profile = esdl_profile
ESDLAsset.add_profile_to_port(p, esdl_profile)
if message['cmd'] == 'remove_profile_from_port':
port_id = message['port_id']
profile_id = message['profile_id']
asset = get_asset_from_port_id(esh, active_es_id, port_id)
if asset:
ports = asset.port
for p in ports:
if p.id == port_id:
# p.profile = esdl_profile
ESDLAsset.remove_profile_from_port(p, profile_id)
if message['cmd'] == 'add_port' or message['cmd'] == 'add_port_with_id':
# merge add_port and add_port_with_id. Why on earth were there two messages for the same thing!
# frontend should be adapted to only send one of these: todo
# ptype and direction do the same thing!
asset_id = message['asset_id']
pname = message['pname']
pid = str(uuid.uuid4())
if 'pid' in message:
pid = message['pid']
if 'ptype' in message:
ptype = message['ptype']
if 'direction' in message:
direction = message['direction']
ptype = 'InPort' if direction == 'in' else 'OutPort'
asset = esh.get_by_id(es_edit.id, asset_id)
if ptype == 'InPort':
port = esdl.InPort(id=pid, name=pname)
else:
port = esdl.OutPort(id=pid, name=pname)
geom = asset.geometry
if len(asset.port) >= 6:
send_alert('ERROR: MapEditor cannot visualize assets with more than 6 ports.')
if isinstance(geom, esdl.Line) and len(asset.port) >= 2:
send_alert('ERROR: Line geometries cannot have more than two ports.')
elif isinstance(geom, esdl.Line) and len(asset.port) == 1 and asset.port[0].eClass.name == ptype:
send_alert('ERROR: Line cannot have ports of the same type.')
else:
if isinstance(geom, esdl.Line) and isinstance(port, esdl.InPort):
asset.port.insert(0, port) # insert InPort always at beginning as this is the convention
else:
asset.port.append(port)
esh.add_object_to_dict(active_es_id, port)
port_list = []
for p in asset.port:
port_list.append(
{'name': p.name, 'id': p.id, 'type': type(p).__name__, 'conn_to': [p.id for p in p.connectedTo]})
emit('update_asset', {'asset_id': asset.id, 'ports': port_list})
if message['cmd'] == 'remove_port':
pid = message['port_id']
asset = get_asset_from_port_id(esh, active_es_id, pid)
ports = asset.port
port_list = []
for p in set(ports):
if p.id == pid:
esh.remove_object_from_dict(active_es_id, p, recursive=True)
ports.remove(p) # remove from list
p.delete() # delete from esdl (e.g. if other ports refer to this port, they will be updated)
# question is why is this necessary in pyecore and isn't this done automatically
# as p is not contained anymore and you get dangling references.
else:
carrier_id = p.carrier.id if p.carrier else None
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__, 'conn_to': [pt.id for pt in p.connectedTo], 'carrier': carrier_id})
emit('update_asset', {'asset_id': asset.id, 'ports': port_list})
if message['cmd'] == 'remove_connection_portids':
from_port_id = message['from_port_id']
from_port = esh.get_by_id(es_edit.id, from_port_id)
to_port_id = message['to_port_id']
to_port = esh.get_by_id(es_edit.id, to_port_id)
from_port.connectedTo.remove(to_port)
from_asset_id = from_port.eContainer().id
to_asset_id = to_port.eContainer().id
# refresh connections in gui
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
new_list = []
#print(conn_list)
for conn in conn_list:
if (conn['from-port-id'] != from_port_id or conn['from-asset-id'] != from_asset_id or
conn['to-port-id'] != to_port_id or conn['to-asset-id'] != to_asset_id) and \
(conn['from-port-id'] != to_port_id or conn['from-asset-id'] != to_asset_id or
conn['to-port-id'] != from_port_id or conn['to-asset-id'] != from_asset_id):
# Remove both directions from -> to and to -> from as we don't know how they are stored in the list
# does not matter, as a connection is unique
new_list.append(conn) # add connections that we are not interested in
else:
print(' - removed {}'.format(conn))
set_session_for_esid(active_es_id, 'conn_list', new_list) # set new connection list
# TODO: send es.id with this message?
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': new_list})
if message['cmd'] == 'remove_connection':
# socket.emit('command', {cmd: 'remove_connection', from_asset_id: from_asset_id, from_port_id: from_port_id,
# to_asset_id: to_asset_id, to_port_id: to_port_id});
from_asset_id = message['from_asset_id']
from_port_id = message['from_port_id']
from_port = esh.get_by_id(es_edit.id, from_port_id)
to_asset_id = message['to_asset_id']
to_port_id = message['to_port_id']
to_port = esh.get_by_id(es_edit.id, to_port_id)
from_port.connectedTo.remove(to_port)
# refresh connections in gui
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
new_list = []
#print(conn_list)
for conn in conn_list:
if (conn['from-port-id'] != from_port_id or conn['from-asset-id'] != from_asset_id or \
conn['to-port-id'] != to_port_id or conn['to-asset-id'] != to_asset_id) and \
(conn['from-port-id'] != to_port_id or conn['from-asset-id'] != to_asset_id or \
conn['to-port-id'] != from_port_id or conn['to-asset-id'] != from_asset_id):
# Remove both directions from -> to and to -> from as we don't know how they are stored in the list
# does not matter, as a connection is unique
new_list.append(conn) # add connections that we are not interested in
else:
print(' - removed {}'.format(conn))
set_session_for_esid(active_es_id, 'conn_list', new_list) # set new connection list
# TODO: send es.id with this message?
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': new_list})
if message['cmd'] == 'set_carrier':
asset_id = message['asset_id']
carrier_id = message['carrier_id']
area = es_edit.instance[0].area
if asset_id:
asset = ESDLAsset.find_asset(area, asset_id)
num_ports = len(asset.port)
if isinstance(asset, esdl.Transport) or num_ports == 1:
set_carrier_for_connected_transport_assets(asset_id, carrier_id)
else:
send_alert("Error: Can only start setting carriers from transport assets or assets with only one port")
update_carrier_conn_list()
if message['cmd'] == 'add_carrier':
# en_carr: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name, emission: carr_emission, encont: carr_encont, encunit: carr_encunit});
# el_comm: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name, voltage: carr_voltage});
# g_comm: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name, pressure: carr_pressure});
# h_comm: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name, suptemp: carr_suptemp, rettemp: carr_rettemp});
# en_comm: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name});
carr_type = message['type']
carr_name = message['name']
carr_id = str(uuid.uuid4())
if carr_type == 'en_carr':
carr_emission = message['emission']
carr_encont = message['encont']
carr_encunit = message['encunit'] # MJpkg MJpNm3 MJpMJ
carr_sofm = message['sofm']
carr_rentype = message['rentype']
carrier = esdl.EnergyCarrier(id = carr_id, name = carr_name, emission = str2float(carr_emission),
energyContent = str2float(carr_encont), energyCarrierType = carr_rentype, stateOfMatter = carr_sofm)
if carr_encunit == 'MJpkg':
encont_qandu=esdl.QuantityAndUnitType(
physicalQuantity=esdl.PhysicalQuantityEnum.ENERGY,
multiplier=esdl.MultiplierEnum.MEGA,
unit=esdl.UnitEnum.JOULE,
perMultiplier=esdl.MultiplierEnum.KILO,
perUnit=esdl.UnitEnum.GRAM)
elif carr_encunit == 'MJpNm3':
encont_qandu=esdl.QuantityAndUnitType(
physicalQuantity=esdl.PhysicalQuantityEnum.ENERGY,
multiplier=esdl.MultiplierEnum.MEGA,
unit=esdl.UnitEnum.JOULE,
perUnit=esdl.UnitEnum.CUBIC_METRE)
elif carr_encunit == 'MJpMJ':
encont_qandu=esdl.QuantityAndUnitType(
physicalQuantity=esdl.PhysicalQuantityEnum.ENERGY,
multiplier=esdl.MultiplierEnum.MEGA,
unit=esdl.UnitEnum.JOULE,
perMultiplier=esdl.MultiplierEnum.MEGA,
perUnit=esdl.UnitEnum.JOULE)
emission_qandu=esdl.QuantityAndUnitType(
physicalQuantity=esdl.PhysicalQuantityEnum.EMISSION,
multiplier=esdl.MultiplierEnum.KILO,
unit=esdl.UnitEnum.GRAM,
perMultiplier=esdl.MultiplierEnum.GIGA,
perUnit=esdl.UnitEnum.JOULE)
carrier.energyContentUnit = encont_qandu
carrier.emissionUnit = emission_qandu
if carr_type == 'el_comm':
carr_voltage = message['voltage']
carrier = esdl.ElectricityCommodity(id=carr_id, name=carr_name, voltage=str2float(carr_voltage))
if carr_type == 'g_comm':
carr_pressure = message['pressure']
carrier = esdl.GasCommodity(id=carr_id, name=carr_name, pressure=str2float(carr_pressure))
if carr_type == 'h_comm':
carr_suptemp = message['suptemp']
carr_rettemp = message['rettemp']
carrier = esdl.HeatCommodity(id=carr_id, name=carr_name, supplyTemperature=str2float(carr_suptemp), returnTemperature=str2float(carr_rettemp))
if carr_type == 'en_comm':
carrier = esdl.EnergyCarrier(id=carr_id, name=carr_name)
esh.add_object_to_dict(es_edit.id, carrier) # add carrier to ID list for easy retrieval
esi = es_edit.energySystemInformation
if not esi:
esi_id = str(uuid.uuid4())
esi = esdl.EnergySystemInformation()
esi.id = esi_id
es_edit.energySystemInformation = esi
esh.add_object_to_dict(es_edit.id, esi)
ecs = esi.carriers
if not ecs:
ecs_id = str(uuid.uuid4())
ecs = esdl.Carriers(id=ecs_id)
esi.carriers = ecs
esh.add_object_to_dict(es_edit.id, ecs)
ecs.carrier.append(carrier)
carrier_list = ESDLEnergySystem.get_carrier_list(es_edit)
emit('carrier_list', {'es_id': es_edit.id, 'carrier_list': carrier_list})
return True
if message['cmd'] == 'remove_carrier':
carrier_id = message['carrier_id']
carrier = esh.get_by_id(es_edit.id, carrier_id)
carrier.delete()
conn_list = get_session_for_esid(es_edit.id, 'conn_list')
for c in conn_list:
if c['from-port-carrier'] == carrier_id:
c['from-port-carrier'] = None
if c['to-port-carrier'] == carrier_id:
c['to-port-carrier'] = None
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': es_edit.id, 'conn_list': conn_list})
if message['cmd'] == 'get_storage_strategy_info':
asset_id = message['asset_id']
mcc, mdc = get_storage_marginal_costs(asset_id)
emit('storage_strategy_window', {'asset_id': asset_id, 'mcc': mcc, 'mdc': mdc})
if message['cmd'] == 'get_curtailment_strategy_info':
asset_id = message['asset_id']
max_power = get_curtailment_max_power(asset_id)
emit('curtailment_strategy_window', {'asset_id': asset_id, 'max_power': max_power})
if message['cmd'] == 'set_control_strategy':
# socket.emit('command', {'cmd': 'set_control_strategy', 'strategy': control_strategy, 'asset_id': asset_id, 'port_id': port_id});
strategy = message['strategy']
asset_id = message['asset_id']
if strategy == 'StorageStrategy':
mcc = message['marg_ch_costs']
mdc = message['marg_disch_costs']
add_storage_control_strategy_for_asset(asset_id, mcc, mdc)
elif strategy == 'CurtailmentStrategy':
max_power = message['max_power']
add_curtailment_control_strategy_for_asset(asset_id, max_power)
else:
port_id = message['port_id']
add_drivenby_control_strategy_for_asset(asset_id, strategy, port_id)
if message['cmd'] == 'remove_control_strategy':
asset_id = message['asset_id']
remove_control_strategy_for_asset(asset_id)
if message['cmd'] == 'set_marginal_costs_get_info':
asset_id = message['asset_id']
mc = get_marginal_costs_for_asset(asset_id)
emit('marginal_costs', {'asset_id': asset_id, 'mc': mc})
if message['cmd'] == 'set_marg_costs':
asset_id = message['asset_id']
mc = str2float(message['marg_costs'])
set_marginal_costs_for_asset(asset_id, mc)
if message['cmd'] == 'layer':
pass
if message['cmd'] == 'run_ESSIM_simulation':
logger.debug('ESSIM simulation command received')
sim_descr = message['sim_description']
sim_start_datetime = message['sim_start_datetime']
sim_end_datetime = message['sim_end_datetime']
essim_kpis = message['essim_kpis']
essim_loadflow = message['essim_loadflow']
# Create the HTTP POST to start the simulation
if not essim.run_simulation(sim_descr, sim_start_datetime, sim_end_datetime, essim_kpis, essim_loadflow):
emit('simulation_not_started')
if message['cmd'] == 'validate_for_ESSIM':
logger.debug('validation for ESSIM command received')
res = validate_ESSIM(es_edit)
emit('results_validation_for_ESSIM', res)
# if message['cmd'] == 'calculate_ESSIM_KPIs':
# session['simulationRun'] = '5d10f273783bac5eff4575e8'
# ESSIM_config = settings.essim_config
#
# simulation_run = get_session('simulationRun')
# if simulation_run:
#
# active_simulation = get_session('active_simulation')
# if active_simulation:
# sdt = datetime.strptime(active_simulation['startDate'], '%Y-%m-%dT%H:%M:%S%z')
# edt = datetime.strptime(active_simulation['endDate'], '%Y-%m-%dT%H:%M:%S%z')
# else:
# send_alert('No active_simulation! This should not happen, please report. However, you can continue')
# sdt = datetime.strptime(ESSIM_config['start_datetime'], '%Y-%m-%dT%H:%M:%S%z')
# edt = datetime.strptime(ESSIM_config['end_datetime'], '%Y-%m-%dT%H:%M:%S%z')
#
# influxdb_startdate = sdt.strftime('%Y-%m-%dT%H:%M:%SZ')
# influxdb_enddate = edt.strftime('%Y-%m-%dT%H:%M:%SZ')
#
# calc_ESSIM_KPIs.submit(es_edit, simulation_run, influxdb_startdate, influxdb_enddate)
# else:
# send_alert('No simulation id defined - run an ESSIM simulation first')
if message['cmd'] == 'add_layer':
id = message['id']
descr = message['descr']
url = message['url']
name = message['name']
setting_type = message['setting_type']
project_name = message['project_name']
legend_url = message['legend_url']
visible = message['visible']
layer = {
"description": descr,
"url": url,
"layer_name": name,
"setting_type": setting_type,
"project_name": project_name,
"legend_url": legend_url,
"layer_ref": None,
"visible": visible
}
wms_layers.add_wms_layer(id, layer)
if message['cmd'] == 'remove_layer':
id = message['id']
wms_layers.remove_wms_layer(id)
if message['cmd'] == 'get_es_info':
attributes = [
{"id": 1, "name": "Energysystem name", "value": es_edit.name},
{"id": 2, "name": "Energysystem description", "value": es_edit.description}
]
emit('show_es_info', attributes)
if message['cmd'] == 'set_es_info_param':
id = message['id']
value = message['value']
if id == "1":
es_edit.name = value
if id == "2":
es_edit.description = value
es_edit.description = value
if message['cmd'] == 'add_sector':
name = message['name']
descr = message['descr']
code = message['code']
ESDLEnergySystem.add_sector(es_edit, name, code, descr)
sector_list = ESDLEnergySystem.get_sector_list(es_edit)
emit('sector_list', {'es_id': es_edit.id, 'sector_list': sector_list})
if message['cmd'] == 'remove_sector':
id = message['id']
esh = get_handler()
ESDLEnergySystem.remove_sector(es_edit, id)
sector_list = ESDLEnergySystem.get_sector_list(es_edit)
emit('sector_list', {'es_id': es_edit.id, 'sector_list': sector_list})
if message['cmd'] == 'set_sector':
asset_id = message['asset_id']
sector_id = message['sector_id']
instance = es_edit.instance
area = instance[0].area
asset = ESDLAsset.find_asset(area, asset_id)
esi = es_edit.energySystemInformation
sectors = esi.sectors
sector = sectors.sector
for s in sector:
if s.id == sector_id:
asset.sector = s
if message['cmd'] == 'get_edr_asset':
edr_asset_id = message['edr_asset_id']
edr_asset_str = edr_assets.get_asset_from_EDR(edr_asset_id)
if edr_asset_str:
edr_asset = ESDLAsset.load_asset_from_string(edr_asset_str)
edr_asset_name = edr_asset.name
edr_asset_type = type(edr_asset).__name__
edr_asset_cap = get_asset_capability_type(edr_asset)
emit('place_edr_asset', edr_asset_type)
set_session('adding_edr_assets', edr_asset_str)
recently_used_edr_assets = get_session('recently_used_edr_assets')
if recently_used_edr_assets:
current_edr_asset_in_list = False
for edra in recently_used_edr_assets:
if edra['edr_asset_id'] == edr_asset_id:
current_edr_asset_in_list = True
if not current_edr_asset_in_list and len(recently_used_edr_assets) == 5:
recently_used_edr_assets.pop() # Remove last element
if not current_edr_asset_in_list:
recently_used_edr_assets.insert(0, {
'edr_asset_id': edr_asset_id,
'edr_asset_name': edr_asset_name,
'edr_asset_type': edr_asset_type,
'edr_asset_cap': edr_asset_cap,
'edr_asset_str': edr_asset_str
})
else:
recently_used_edr_assets = list()
recently_used_edr_assets.append({
'edr_asset_id': edr_asset_id,
'edr_asset_name': edr_asset_name,
'edr_asset_type': edr_asset_type,
'edr_asset_cap': edr_asset_cap,
'edr_asset_str': edr_asset_str
})
set_session('recently_used_edr_assets', recently_used_edr_assets)
emit('recently_used_edr_assets', recently_used_edr_assets)
else:
send_alert('Error getting ESDL model from EDR')
if message['cmd'] == 'set_asset_drawing_mode':
mode = message['mode']
set_session('asset_drawing_mode', mode)
if mode == 'empty_assets':
set_session('adding_edr_assets', None)
set_session('asset_from_measure_id', None)
if mode == 'edr_asset':
edr_asset_info = message['edr_asset_info']
# If you select an asset from the EDR directly, ESDL string is cached.
# AssetDrawToolbar EDR assets that are stored in mongo, do not have the ESDL string stored.
if 'edr_asset_str' not in edr_asset_info:
edr_asset_id = edr_asset_info['edr_asset_id']
edr_asset_info['edr_asset_str'] = edr_assets.get_asset_from_EDR(edr_asset_id)
set_session('adding_edr_assets', edr_asset_info['edr_asset_str'])
if mode == 'asset_from_measures':
asset_from_measure_id = message['asset_from_measure_id']
set_session('asset_from_measure_id', asset_from_measure_id)
if message['cmd'] == 'query_esdl_service':
params = message['params']
logger.debug("received query_esdl_service command with params: {}".format(params))
query_esdl_services.submit(params)
if message['cmd'] == 'redraw_connections': # set_carrier_color
# this is called when a carrier color is changed and the gui needs to be refreshed
# best would be to do this fully in the front end (no changes in the ESDL model)
# but that does not contain enough information yet to do this.
conn_list = get_session_for_esid(active_es_id, 'conn_list')
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
asset_list = get_session_for_esid(active_es_id, 'asset_list')
emit('clear_ui', {'layer': 'assets'}) # clear current active layer assets
emit('add_esdl_objects', {'es_id': active_es_id, 'asset_pot_list': asset_list, 'zoom': False})
if message['cmd'] == 'building_editor':
bld_id = message['id']
building = esh.get_by_id(active_es_id, bld_id)
bld_info = get_building_information(building)
emit('building_information', bld_info)
emit('add_esdl_objects',
{'es_id': active_es_id, 'add_to_building': True, 'asset_pot_list': bld_info["asset_list"],
'zoom': False})
emit('add_connections', {'es_id': active_es_id, 'add_to_building': True, 'conn_list': bld_info["conn_list"]})
if message['cmd'] == 'accept_received_esdl':
user_email = get_session('user-email')
received_esdls = esdl_api.get_esdl_for_user(user_email)
if received_esdls:
for received_esdl in received_esdls:
filename = 'ESDL from '+received_esdl['sender']
esh = get_handler()
try:
result, parse_info = esh.add_from_string(name=filename, esdl_string=urllib.parse.unquote(received_esdl['esdl']))
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(filename, info))
call_process_energy_system.submit(esh, filename) # run in seperate thread
esdl_api.remove_esdls_for_user(user_email)
except Exception as e:
logger.error("Error loading {}: {}".format(filename, e))
send_alert('Error interpreting ESDL from file - Exception: ' + str(e))
if message['cmd'] == 'rename_energysystem':
name = message['name']
rename_es_id = message['remame_es_id']
es_rename = esh.get_energy_system(es_id=rename_es_id)
es_rename.name = name
if message['cmd'] == 'remove_energysystem':
remove_es_id = message['remove_es_id']
esh.remove_energy_system(es_id=remove_es_id)
if message['cmd'] == 'refresh_esdl':
print('refresh_esdl')
esh = get_handler()
call_process_energy_system.submit(esh, force_update_es_id=es_edit.id, zoom=False) # run in seperate thread
set_handler(esh)
session.modified = True
@executor.job
def query_esdl_services(params):
esh = get_handler()
logger.debug('calling service')
try:
esdl_service_ok, esdl_service_result = esdl_services.call_esdl_service(params)
except Exception as exc:
logger.exception("Exception when querying ESDL service")
esdl_service_ok = False
esdl_service_result = str(exc)
logger.debug('emitting result to browser')
if esdl_service_ok:
if esdl_service_result is not None:
emit('esdl_service_result', esdl_service_result)
else:
message = 'Error calling service'
if isinstance(esdl_service_result, str):
message += ': ' + esdl_service_result
send_alert(message)
# logger.debug('processing energy system')
call_process_energy_system.submit(esh)
@socketio.on('set_active_es_id', namespace='/esdl')
def set_active_es_id(id):
set_session('active_es_id', id)
logger.debug("========== Setting active es_id to {} =============".format(id))
# ---------------------------------------------------------------------------------------------------------------------
# React on commands from the browser (add, remove, ...)
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on('file_command', namespace='/esdl')
def process_file_command(message):
logger.info('received: ' + message['cmd'])
es_info_list = get_session("es_info_list")
if message['cmd'] == 'new_esdl':
name = message['name']
description = message['description']
email = message['email']
top_area_name = message['top_area_name']
if top_area_name == '': top_area_name = 'Untitled area'
if name == '': name = 'New Energy System'
filename = 'Unknown'
esh = EnergySystemHandler()
es = esh.create_empty_energy_system(name, description, 'Untitled instance', top_area_name, esdlVersion=esdl_doc.get_esdl_version())
es_info_list = {}
set_session("es_info_list", es_info_list)
emit('clear_ui')
emit('clear_esdl_layer_list')
call_process_energy_system.submit(esh, filename)
del_session('store_item_metadata')
emit('store_item_metadata', {})
set_session('active_es_id', es.id)
set_session('es_filename', filename)
set_session('es_email', email)
if message['cmd'] == 'load_esdl_from_file':
file_content = message['file_content']
filename = message['filename']
esh = EnergySystemHandler()
try:
result, parse_info = esh.load_from_string(esdl_string=file_content, name=filename)
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(filename, info))
except Exception as e:
logger.exception(f"Error opening {filename}")
send_alert("Error opening {}. Exception is: {}".format(filename, e))
emit('clear_ui')
return
es = esh.get_energy_system()
set_handler(esh)
es_info_list = {}
set_session("es_info_list", es_info_list)
emit('clear_ui')
emit('clear_esdl_layer_list')
call_process_energy_system.submit(esh, filename) # run in seperate thread
#thread = threading.Thread(target=process_energy_system, args=(esh, None, None, current_app._get_current_object() ))
#thread.start()
del_session('store_item_metadata')
emit('store_item_metadata', {})
set_session('active_es_id', es.id)
set_session('es_filename', filename)
if message['cmd'] == 'import_esdl_from_file':
file_content = message['file_content']
filename = message['filename']
esh = get_handler()
try:
imported_es, parse_info = esh.add_from_string(name=filename, esdl_string=file_content)
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(filename, info))
call_process_energy_system.submit(esh, filename) # run in seperate thread
set_session('active_es_id', imported_es.id)
set_session('es_filename', filename)
except Exception as e:
logger.error("Error loading {}: {}".format(filename, e))
send_alert('Error interpreting ESDL from file - Exception: ' + str(e))
if message['cmd'] == 'get_list_from_store':
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url + 'tagged?tag=map&take=1000'
else:
store_url = default_store_url+ 'tagged?tag=map&take=1000'
try:
result = requests.get(store_url)
except Exception as e:
logger.error('Error accessing ESDL store' + str(e))
send_alert('Error accessing ESDL store' + str(e))
return
data = result.json()
store_list = []
for store_item in data:
store_list.append({'id': store_item['id'], 'title': store_item['title']})
sorted_store_list = sorted(store_list, key=lambda x: x['title'], reverse=False)
emit('store_list', sorted_store_list)
if message['cmd'] == 'load_esdl_from_store':
store_id = message['id']
esh = load_ESDL_EnergySystem(store_id)
if esh:
es = esh.get_energy_system()
if es.name:
title = 'Store name: ' + es.name + ', store id: ' + store_id
else:
title = 'Store id: ' + store_id
set_session('active_es_id', es.id)
set_session('es_filename', title) # TODO: separate filename and title
es_info_list = {}
set_session("es_info_list", es_info_list)
emit('clear_ui')
emit('clear_esdl_layer_list')
call_process_energy_system.submit(esh, None, title)
else:
send_alert('Error loading ESDL file with id {} from store'.format(store_id))
if message['cmd'] == 'import_esdl_from_store':
store_id = message['id']
imported_es = import_ESDL_EnergySystem(store_id)
if imported_es:
if imported_es.name:
title = 'Store name: ' + imported_es.name + ', store id: ' + store_id
else:
title = 'Store id: ' + store_id
esh = get_handler()
call_process_energy_system.submit(esh, None, title) # run in seperate thread
set_session('active_es_id', imported_es.id)
set_session('es_filename', title)
if message['cmd'] == 'store_esdl':
title = message['store_title']
descr = message['store_descr']
email = message['store_email']
tags = ['map']
esh = get_handler()
store_item_metadata = get_session('store_item_metadata')
if store_item_metadata:
store_id = store_item_metadata['id']
update_store_item(store_id, title, descr, email, tags, esh)
else:
store_id = get_session('active_es_id')
create_new_store_item(store_id, title, descr, email, tags, esh)
# Do not store file_content in logging database
if 'file_content' in message:
del message['file_content']
user_email = get_session('user-email')
user_actions_logging.store_logging(user_email, "file-command", message['cmd'], json.dumps(message), "", {})
# if message['cmd'] == 'save_esdl':
# esh = get_handler()
# try:
# write_energysystem_to_file('./static/EnergySystem.esdl', esh)
# # TODO: do we need to flush??
# emit('and_now_press_download_file')
# except Exception as e:
# send_alert('Error saving ESDL file to filesystem - exception: '+str(e))
# if message['cmd'] == 'download_esdl':
# esh = get_handler()
# name = get_session('es_title').replace(' ', '_')
#
# send_ESDL_as_file(esh, name)
# ---------------------------------------------------------------------------------------------------------------------
# Connect from browser
# - initialize energysystem information
# - send info to browser
# ---------------------------------------------------------------------------------------------------------------------
def initialize_app():
session.permanent = True
logger.info('Client connected: {}'.format(request.sid))
if 'client_id' in session:
logger.info('Energysystem in memory - reloading client data')
esh = get_handler()
else:
logger.info('No energysystem in memory - generating empty energysystem')
esh = EnergySystemHandler()
esh.create_empty_energy_system('Untitled EnergySystem', '', 'Untitled Instance', 'Untitled Area',
esdlVersion=esdl_doc.get_esdl_version())
# TODO: discuss how to set active_es_id for the first time after a client connects
es_list = esh.get_energy_systems()
if es_list:
last_es = es_list[-1]
set_session('active_es_id', last_es.id)
else:
logger.error("No energy systems in esh list - Edwin and Ewoud discuss!!")
es_info_list = {}
set_session("es_info_list", es_info_list)
emit('clear_ui')
emit('clear_esdl_layer_list')
call_process_energy_system.submit(esh, None, None) # run in a seperate thread
@socketio.on('connect', namespace='/esdl')
def connect():
logger.info("Websocket connection established")
if 'id' in session:
logger.debug('- Old socketio id={}, new socketio id={}'.format(session['id'], request.sid))
else:
logger.debug('- Old socketio id={}, new socketio id={}'.format(None, request.sid))
session['id'] = request.sid
set_session('socketio_sid', request.sid)
# Client ID is used to retrieve session variables in handler_manager
# So this is a very important session variable!!
if 'client_id' in session:
logger.debug('- Client id: {}'.format(session['client_id']))
else:
logger.debug('- No client id in session')
if not valid_session():
send_alert("Session has timed out, please refresh")
def get_qau_information():
qau_info = dict()
qau_info['generic'] = ESDLQuantityAndUnits.get_qau_information()
qau_info['profile_type_enum_values'] = ESDLQuantityAndUnits.get_profile_type_enum_values()
qau_info['predefined_qau'] = esdl_config.esdl_config['predefined_quantity_and_units']
return qau_info
def get_carrier_color_dict():
me_settings = MapEditorSettings.get_instance()
me_ui_setting = me_settings.get_system_setting(MAPEDITOR_UI_SETTINGS)
if me_ui_setting:
if 'carrier_colors' in me_ui_setting:
return me_ui_setting['carrier_colors']
return None
@socketio.on('initialize', namespace='/esdl')
def browser_initialize():
user_email = get_session('user-email')
role = get_session('user-role')
view_modes = ViewModes.get_instance()
view_modes.initialize_user(user_email)
me_settings = MapEditorSettings.get_instance()
user_settings = me_settings.get_user_settings(user_email)
set_session('user_settings', user_settings)
logger.info('Send initial information to client')
emit('user_settings', user_settings)
emit('control_strategy_config', esdl_config.esdl_config['control_strategies'])
emit('carrier_color_dict', get_carrier_color_dict())
emit('wms_layer_list', wms_layers.get_layers())
emit('cap_pot_list', ESDLAsset.get_objects_list())
emit('qau_information', get_qau_information())
emit('esdl_services', esdl_services.get_user_services_list(user_email, role))
emit('user_info', {'email': user_email})
initialize_app()
# ---------------------------------------------------------------------------------------------------------------------
# Disconnect
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on('disconnect', namespace='/esdl')
def on_disconnect():
logger.info('Client disconnected: {}'.format(request.sid))
# ---------------------------------------------------------------------------------------------------------------------
# Error logging
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on_error_default
def default_error_handler(e):
logger.error('Error in SocketIO handler: '+str(e))
import traceback
logger.error('Socket IO message: {}'.format(request.event["message"])) # "my error event"
logger.error('Socket IO arguments: {}'.format(request.event["args"]))
traceback.print_exc()
# ---------------------------------------------------------------------------------------------------------------------
# Start application
# ---------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
parse_esdl_config()
logger.info("Starting ESDL MapEditor application")
user_actions_logging.store_logging("System", "application start", "", "", "", {})
socketio.run(app, debug=settings.FLASK_DEBUG, host=settings.FLASK_SERVER_HOST, port=settings.FLASK_SERVER_PORT, use_reloader=True)
|
blockchain.py
|
#!/usr/bin/python3
import sys
sys.path.insert(0, 'Utilities')
sys.path.insert(0, 'Utilities/ProofOfStake')
import datetime
from main import ProofOfStakeMAIN
from tinydb import TinyDB, Query
from cryptography_testing import *
import hashlib
import time
import json
from urllib.parse import urlparse
from uuid import uuid1, uuid4
import requests as r
from urllib import error
import random
from passlib.hash import pbkdf2_sha256
import base64
from Wallets import Signatures
from multiprocessing import Process
# git add .
# git commit -m "Message"
# git push
algs = Algs()
ring_ct = Ring_CT()
decoy_transactions = Decoy_addresses()
DB = TinyDB('db_blockchain.json')
NODES = TinyDB('nodes.json')
wallet_bal = Check_Wallet_Balance()
signatures = Signatures()
class Blockchain:
""" the blockchain class """
def __init__(self):
self.nodes = []
if len(self.read_data(NODES)) > len(self.nodes):
self.nodes = self.read_data(NODES)
print(self.nodes)
else:
# NODES.insert(self.nodes)
# self.read_data(NODES)
# self.nodes = []
None
self.unconfirmed_transactions = []
self.new_transactions = []
self.allnodes = None
self.chain = [] # stores the blockchain
# Checks to see if a chain is already present
self.old_chain = self.read_data(DataBase=DB)
if len(self.old_chain) > len(self.chain):
self.chain = self.old_chain
self.transactions = []
else:
self.transactions = ["How's our data?"]
# helps with block creation
self.create_block(proof=1, previous_hash="0",
forger='Network', timestamp='0')
def add_node_to_file(self):
""" writes the nodes to a file since tinydb is being a pain """
current_nodes = self.nodes
un_added_nodes = []
for node in current_nodes:
file1 = open('nodes.txt', 'r')
if node in file1.read():
None
else:
un_added_nodes.append(f'{node}\n')
file1 = open('nodes.txt', 'w')
file1.writelines((un_added_nodes))
def add_smartContract(self, senderprivatekey: str, senderviewkey: str, sendersendpublickey, receiver, compiledcontract):
""" This is used to add transactions so they can be verified """
unconfirmedTransaction = {'sender send publickey': sendersendpublickey, 'sender send privatekey': senderprivatekey, 'sender address': senderviewkey,
'receiver': receiver, 'amount': algs.fee, 'id': uuid1(), 'timestamp': time.time(), 'type': 'Contract', 'contract': compiledcontract}
verify = self.doubleSpendCheck(unconfirmedTransaction)
if verify == False:
self.unconfirmed_transactions.append(unconfirmedTransaction)
return unconfirmedTransaction
def to_JSON(self, data):
""" Converts to json """
return json.loads(json.dumps(data))
def add_data(self, data, DataBase):
""" This adds data to the database that is selected """
DataBase.truncate()
for item in data:
# formatted = {'node': item}
DataBase.insert(item)
return 'data has been added!!'
def add_node_to_file_tinydb(self, data, DataBase):
""" This adds data to the database that is selected """
DataBase.truncate()
for item in data:
formatted = {'node': item}
DataBase.insert(formatted)
return 'data has been added!!'
def read_data(self, DataBase):
""" Reads all the data in the selected database """
data = DataBase.all()
return data
def update_nodes(self, node):
""" Updates the list of nodes on one node to prevent loops when announcing new nodes on the network"""
self.nodes.append(node)
self.add_data(data=self.nodes, DataBase=NODES)
return None
def create_block(self, proof, previous_hash, forger, timestamp=str(time.time())):
""" Used to make a block and when a block is being made the transactions are verified, invalid transactions are removed from the list of
transactions, the list of transactions resets. When the block is added it is announced to all the nodes as a new block """
if len(self.chain) > 0:
valid = self.suspendAlgorithm(forger)
if valid == False:
self.new_transactions = []
miner_reward = algs.amount_change(self.chain)
transactionlist = []
if len(self.chain) > 0:
for transaction in self.unconfirmed_transactions:
# verify transactions and add transaction for the miner
valid = self.verify_transactions(transaction)
if valid == True:
self.transactions.append(transaction)
else:
self.removeTransaction(transaction)
else:
return 'Address cannot forge block due to it being in the receiving end of a transaction in the most recent 20 blocks'
self.add_miner_transaction('network', forger, miner_reward)
block = {
'index': len(self.chain) + 1,
'timestamp': str(timestamp),
'proof': proof,
'previous_hash': previous_hash,
'data': self.transactions
}
self.transactions = []
self.chain.append(block)
self.add_data(data=self.chain, DataBase=DB)
print(block)
if len(self.chain) > 1:
thread = Process(target=self.post_chain, args=(block, ))
thread.start()
return block
def get_prev_block(self):
""" get the previous block on the current blockchain """
return self.chain[-1]
def post_chain(self, block):
""" sends the new block to all nodes """
for nodes in self.nodes:
try:
node = nodes['node']
json = {"block": block}
url = r.post(f'http://{node}/insert_block', json=json)
url_status = url.status_code
print(f"http://{node}/insert_block {url_status}")
except:
None
return 'chain is updated among all nodes'
def update_chain(self, block: dict):
""" Updates the chain and checks if the new block is valid """
lengthofunconfirmedtransactions = len(self.unconfirmed_transactions)
lengthofblocktransactions = len(block['data'])
if lengthofunconfirmedtransactions > lengthofblocktransactions:
new_chain = self.read_data(DB)
sizeCheck = self.recevBlockCheckSize(block=block)
new_chain.append(block)
if len(new_chain) > len(self.chain):
valid = self.is_chain_valid(chain=new_chain)
self.checkTransactions(block)
if valid == True and sizeCheck == True:
self.add_data(data=self.chain)
self.chain = new_chain
return self.chain
else:
self.replace_chain()
return self.chain
else:
self.replace_chain()
self.unconfirmed_transactions = []
# self.add_data(data=self.unconfirmed_transactions, DataBase=UNconfirmed_transactions)
return self.chain
def proof_of_work(self, previous_proof):
""" This is used for mining, the proof of work algorithm """
new_proof = 1
check_proof = False
chain = self.chain
while check_proof is False:
if chain == self.chain:
hash_op = hashlib.sha256(str(new_proof**2 -
previous_proof**2).encode()).hexdigest()
work = algs.difficulty_increase(self.chain, self.nodes)
if hash_op[:len(work)] == algs.difficulty:
check_proof = True
else:
new_proof += 1
else:
check_proof = False
break
return new_proof
def add_false_transactions(self, transaction):
""" Adds fake transactions """
transactions = []
transactions.append(transaction)
decoy_transact = decoy_transactions.decoy_transactions(
transactions=transactions)
for decoy in decoy_transact:
transactions.append(decoy)
return transactions
def hash(self, block):
"""This is used to hash a block using sha256"""
encoded = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded).hexdigest()
def blockSizeCheck(self, transactions: list):
""" Checks the block size of blocks that haven't been created yet """
block = {
'index': len(self.chain) + 1,
'timestamp': str(time.time()),
'proof': random.randint(200, 1000000000000),
'previous_hash': hashlib.sha256(self.chain[-1].encode()).hexdigest(),
'data': transactions + transactions[-1]
}
size_check = self.dynamicSizeLimit(block)
return size_check
def recevBlockCheckSize(self, block):
""" Checks block size of a newly made block """
sizeofblock = self.dynamicSizeLimit(block)
return sizeofblock
def dynamicSizeLimit(self, Newblock):
""" Checks using the newest 100 blocks' size """
sizeofblock = 0
if len(self.chain) >= 20:
newest100blocks = self.chain[-20:]
else:
newest100blocks = self.chain
for block in newest100blocks:
sizeofblock = sys.getsizeof(block) + sizeofblock
mean = sizeofblock / 20
times2 = mean * 2
if sys.getsizeof(Newblock) <= times2:
return True
else:
return False
def is_chain_valid(self, chain, work=algs.count, limit=algs.difficulty):
"""Checks if the chain is valid with checking the previous hash and the proof"""
previous_block = chain[0]
block_index = 1
algs.difficulty_increase(chain, self.nodes)
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(
str(proof - previous_proof).encode()).hexdigest()
# prev_block = chain[block_index - 1]
if block['index'] == previous_block['index']:
return False
if hash_operation[:len(work)] == limit:
return False
previous_block = block
block_index += 1
return True
def add_miner_transaction(self, sender: str, receiver: str, amount: float):
""" This is used to add miner transactions """
hashed_sender = str(pbkdf2_sha256.hash(sender))
hashed_sender = hashed_sender.replace('$pbkdf2-sha256$29000$', '')
hashed_receiver = str(pbkdf2_sha256.hash(receiver))
hashed_receiver = hashed_receiver.replace('$pbkdf2-sha256$29000$', '')
senders = ring_ct.make_ring_sign(
blockchain=self.chain, primary_address=hashed_sender)
receivers = ring_ct.make_ring_sign(
blockchain=self.chain, primary_address=hashed_receiver)
transactionID = str(uuid4())
timestamp = str(time.time())
transactionforsigning = {'sender': senders, 'amount': amount,
'receiver': receivers, 'id': transactionID, 'timestamp': timestamp}
transaction = transactionforsigning
signsender = transaction
minertransaction = {'sender': senders, 'amount': amount, 'receiver': receivers,
'sender signature': 'Network', 'id': transactionID, 'timestamp': timestamp, 'type': 'Transaction'}
self.transactions.append(minertransaction)
previous_block = self.get_prev_block()
return previous_block['index'] + 1
def checkTransactions(self, block):
""" checks if a transaction is in new block """
return numOfTransactionsInBlock
def doubleSpendCheck(self, transaction):
""" checks for double spending in the block"""
verify = self.equals(transaction)
verify2 = self.timeStampCheck(transaction)
verify3 = self.duplicate_id_in_chain(transaction)
if verify == True or verify2 == True or verify3 == True:
return True
return False
def equals(self, transaction):
""" checks for repeat transcation ids in the transaction """
for uncontransaction in self.unconfirmed_transactions:
transactionID = transaction['id']
unconfirmedtransactionID = uncontransaction['id']
if transactionID == unconfirmedtransactionID:
return True
return False
def duplicate_id_in_chain(self, transaction):
""" Checks the transaction id in the whole blockchain """
unconfirmed_id = transaction['id']
for block in self.chain:
if block['index'] != 1:
for valid_transaction in block['data']:
print(valid_transaction)
if unconfirmed_id == valid_transaction['id']:
return True
return False
def timeStampCheck(self, transaction):
""" Checks for a reapeat timestamp in the transaction """
for uncontransaction in self.unconfirmed_transactions:
unconfirmedtimestamp = uncontransaction['timestamp']
transactiontimestamp = transaction['timestamp']
if unconfirmedtimestamp == transactiontimestamp:
return True
return False
def suspendAlgorithm(self, address):
""" Checks to see if the address is reapeating in the blockchain, this is to prevent someone from owning too
much of the blockchain and fight against large scale mining and 51% attacks """
blockIndex = self.chain[-1]['index']
blockIndex = blockIndex - 20
if blockIndex >= 0:
for block in self.chain[20:]:
for data in block['data']:
for receiver in data['receiver']:
stealthAddress = receiver
verify = Check_Wallet_Balance().verify_keys(
publickey=stealthAddress, privatekey=address)
if verify == True:
return True
return False
if blockIndex < 0:
for block in self.chain[1:]:
for data in block['data']:
for receiver in data['receiver']:
stealthAddress = receiver
verify = Check_Wallet_Balance().verify_keys(
publickey=stealthAddress, privatekey=address)
if verify == True:
return True
return False
def broadcast_transaction(self, transaction):
""" sends list of unconfirmed transactions to all nodes """
for nodes in self.nodes:
node = nodes['node']
url = f'http://{node}/add_transaction/'
json = {'transaction': transaction}
r.post(url, json=json)
def add_transaction(self, sendersignature: str, sender, receiver, amount: float, transactionID: str):
""" This is used to add transactions so they can be verified """
return unconfirmedTransaction
""" to prevent loops in the network when adding transactions """
def add_unconfirmed_transaction(self, sendersignature: str, sender, receiver, amount: float):
""" This is used to add transactions so they can be verified """
unconfirmedTransaction = {'sender send publickey': sender, 'signature':sendersignature,
'receiver': receiver, 'amount': amount, 'id': str(uuid4()), 'timestamp': time.time(), 'type': 'Transaction'}
verify = self.doubleSpendCheck(unconfirmedTransaction)
if verify == False:
self.unconfirmed_transactions.append(unconfirmedTransaction)
return unconfirmedTransaction
def verify_transactions(self, transaction):
""" verifies transactions on the blockchain """
sender = transacton['sender']
receiver = transaction['receiver']
signature_of_sender = transaction['signature']
transaction_signature_is_valid = signatures.verify(public_key=sender, receiver=receiver, signature=signature_of_sender)
double_spend = self.doubleSpendCheck(transaction)
if double_spend == False and transaction_signature_is_valid == True:
return True
else:
return False
# P2p nodes
def removeTransaction(self, transaction):
""" Removes invalid transactions """
self.unconfirmed_transactions.remove(transaction)
def add_node(self, address):
""" This method adds a node to the network """
test = r.get(f'http://{address}/get_the_chain')
if test.status_code == 200:
new_node = address
self.nodes.append(new_node)
# self.nodes = set(self.nodes)
# self.nodes = list(self.nodes)
# self.add_node_to_file()
self.add_node_to_file_tinydb(self.nodes, NODES)
self.nodes = self.read_data(NODES)
return self.nodes[-1]
# try:
# if test.status_code == 200:
# for node in self.nodes:
# json = {'node':address}
# r.post(f'http://{node}/add_one_node/', json=json)
# json = {'node':node}
# r.post(f'http://{address}/add_one_node/', json=json)
# return self.nodes[-1]
# else:
# return {'message': 'invalid node address!'}
# except:
# return {'message': 'invalid node address!'}
"""
Get the chain and validity of the chain among the nodes
Find the blockchain with the greatest length and replace the other chains
"""
def replace_chain(self):
""" This replaces the chain and checks if it is valid """
if len(self.nodes) == 0:
return {'message': 'add some nodes to get the latest chain', 'blockchain': self.chain}
else:
longest_chain = None
print(self.nodes)
max_length = len(self.chain)
for nodes in self.nodes:
node = nodes['node']
try:
print(f'http://{node}/get_the_chain')
response = r.get(f'http://{node}/get_the_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['blockchain']
if length > max_length and self.is_chain_valid(chain=chain):
max_length = length
longest_chain = chain
if longest_chain != None:
if len(longest_chain) > len(self.chain):
self.chain = longest_chain
self.unconfirmed_transactions = []
self.add_data(
data=self.unconfirmed_transactions, DataBase=UNconfirmed_transactions)
return True
else:
longest_chain = self.chain
else:
longest_chain = self.chain
if response.status_code != 200:
longest_chain = self.chain
max_length = len(self.chain)
except:
longest_chain = self.chain
return False
|
ftp_client.py
|
import os
from datetime import datetime
from ftplib import FTP
from threading import Thread
from namenode.fs_tree import Directory, File
def create_replica(status, source_ip, dest_ip, path_from, path_to, auth_data):
try:
with FTP(source_ip, **auth_data) as ftp:
responce = ftp.sendcmd(f"REPL {path_from} {path_to} {dest_ip}").split(' ')[0]
status[dest_ip] = responce == '250'
except ConnectionRefusedError:
status[dest_ip] = False
class FTPClient:
def __init__(self, namenode, num_replicas, **auth_data):
self.num_replicas = num_replicas
self.namenode = namenode
self.datanodes = set()
self.auth_data = auth_data
def initialize(self):
disk_sizes = []
for datanode in self.datanodes:
try:
with FTP(datanode, **self.auth_data) as ftp:
ftp.voidcmd("RMDCONT /")
available_size = ftp.sendcmd("AVBL /").split(' ')[4]
disk_sizes.append(int(available_size))
except ConnectionRefusedError:
continue
self.namenode.fs_tree = Directory('/')
self.namenode.work_dir = self.namenode.fs_tree
result = sum(disk_sizes)
units = ['B', 'KB', 'MB', 'GB', 'TB']
i = 0
while result / 1000 > 2:
i += 1
result /= 1000
return f"Available size of the storage is {round(result, 2)} {units[i]}"
def get_file(self, file_path):
parent_dir, abs_path = self.namenode.work_dir.get_absolute_path(file_path)
if parent_dir is None:
return None, 'Incorrect path', None
file_name = file_path.split('/')[-1]
abs_path = os.path.join(str(parent_dir), file_name)
return parent_dir, abs_path, file_name
def get_dir(self, file_path):
parent_dir, abs_path = self.namenode.work_dir.get_absolute_path(file_path)
if parent_dir is None:
return None, 'Incorrect path', None
if str(parent_dir) == abs_path:
return parent_dir, str(parent_dir), str(parent_dir)
else:
file_name = file_path.split('/')[-1]
abs_path = os.path.join(str(parent_dir), file_name)
return parent_dir, abs_path, file_name
def create_file(self, file_path):
parent_dir, abs_path, file_name = self.get_file(file_path)
if parent_dir is None:
return abs_path
if file_name in parent_dir:
return 'File already exists.'
file = parent_dir.add_file(file_name)
file.set_write_lock()
try:
selected_datanodes = set()
for datanode in self.datanodes:
if len(selected_datanodes) > self.num_replicas:
break
try:
with FTP(datanode, **self.auth_data) as ftp:
ftp.voidcmd(f"CRF {abs_path}")
selected_datanodes.add(datanode)
except ConnectionRefusedError:
continue
file.nodes = selected_datanodes
file.release_write_lock()
except Exception as e:
file.release_write_lock()
parent_dir.delete_file(file_name)
return 'File was not created due to internal error.'
return ''
def read_file(self, file_path, client_ip):
parent_dir, abs_path, file_name = self.get_file(file_path)
if parent_dir is None:
return abs_path
if file_name not in parent_dir:
return 'File does not exist.'
file = parent_dir.children_files[file_name]
if not file.readable():
return 'File is being written. Reading cannot be performed.'
self.namenode.set_lock(client_ip, file, 0)
return {'ips': list(file.nodes), 'path': abs_path}
def write_file(self, file_path, client_ip, file_size):
parent_dir, abs_path, file_name = self.get_file(file_path)
if parent_dir is None:
return abs_path
if file_name in parent_dir:
file = parent_dir.children_files[file_name]
if not file.writable():
return 'File is blocked by another process. Writing cannot be performed.'
else:
file = parent_dir.add_file(file_name)
self.namenode.set_lock(client_ip, file, 1)
selected_nodes = self._select_datanodes_for_write(file, file_size)
if len(selected_nodes) == 0:
self.namenode.release_lock(client_ip, abs_path)
return 'There is no available nodes to store file'
else:
file.new_nodes = selected_nodes
return {'ips': list(selected_nodes), 'path': abs_path}
def replicate_file(self, file_path, client_ip, node_ip):
parent_dir, abs_path, file_name = self.get_file(file_path)
if parent_dir is None:
return abs_path
file = parent_dir.children_files[file_name]
self.namenode.release_lock(client_ip, abs_path)
file.set_write_lock()
if node_ip in file.nodes:
file.nodes.remove(node_ip)
self._delete_file_from_nodes(file)
file.nodes = file.new_nodes
left_nodes = file.nodes.copy()
storing_nodes = {node_ip}
left_nodes = left_nodes.difference(storing_nodes)
while len(left_nodes) > 0 and len(storing_nodes) < self.num_replicas:
statuses = {}
i = len(storing_nodes)
threads = []
for storing_node, left_node in zip(storing_nodes, left_nodes):
if i >= self.num_replicas:
break
args = (statuses, storing_node, left_node, str(file), str(file), self.auth_data)
thread = Thread(target=create_replica, args=args)
thread.start()
threads.append((left_node, storing_node, thread))
i += 1
for thread in threads:
thread[-1].join()
for (dest_node, status), (_, source_node, _) in zip(sorted(statuses.items()), sorted(threads)):
left_nodes.remove(dest_node)
if status:
storing_nodes.add(dest_node)
file.nodes = storing_nodes
file.release_write_lock()
return "File was replicated"
def _select_datanodes_for_write(self, file, file_size):
selected_nodes = set()
for node in self.datanodes:
try:
with FTP(node, **self.auth_data) as ftp:
old_file_size = file.size if node in file.nodes else 0
if int(ftp.sendcmd("AVBL /").split(' ')[4]) - old_file_size > file_size:
selected_nodes.add(node)
except ConnectionRefusedError:
continue
return selected_nodes
def _delete_file_from_nodes(self, file):
deleted_nodes = set()
for node in file.nodes:
try:
with FTP(node, **self.auth_data) as ftp:
ftp.voidcmd(f"DELE {file}")
deleted_nodes.add(node)
except ConnectionRefusedError:
continue
return deleted_nodes
def _copy_file_on_nodes(self, file, new_file, copy=True):
new_file_nodes = set()
for node in file.nodes:
try:
with FTP(node, **self.auth_data) as ftp:
if copy:
ftp.voidcmd(f"CP {file} {new_file}")
else:
ftp.voidcmd(f"MV {file} {new_file}")
new_file_nodes.add(node)
except ConnectionRefusedError:
continue
return new_file_nodes
def _get_relocation_info(self, file_path_from, dir_path_to):
file_parent_dir, file_abs_path, file_name = self.get_file(file_path_from)
dir_parent_dir, dir_abs_path, dir_name = self.get_dir(dir_path_to)
if dir_parent_dir is None:
return None, dir_abs_path
if file_parent_dir is None:
return None, file_abs_path
if file_name not in file_parent_dir:
return None, 'File does not exist.'
if str(dir_parent_dir) != dir_abs_path:
if dir_name not in dir_parent_dir:
return None, 'Directory does not exist.'
new_parent_dir = dir_parent_dir.children_directories[dir_name]
else:
new_parent_dir = dir_parent_dir
if file_name in new_parent_dir.children_files:
return None, 'File with the same name already exist in directory.'
if not file_parent_dir.children_files[file_name].readable():
return None, 'File is being written. Copying cannot be performed.'
return file_name, file_parent_dir, new_parent_dir
def move_file(self, file_path_from, dir_path_to):
result = self._get_relocation_info(file_path_from, dir_path_to)
if result[0] is None:
return result[1]
file_name, file_parent_dir, new_parent_dir = result
file = file_parent_dir.delete_file(file_name)
try:
new_file_path = os.path.join(str(new_parent_dir), file_name)
new_file_nodes = self._copy_file_on_nodes(file, new_file_path, copy=False)
except Exception as e:
file_parent_dir.children_files[file_name] = file
return 'File was not moved due to internal error.'
new_parent_dir.children_files[file_name] = file
file.parent = new_parent_dir
file.nodes = new_file_nodes
return ''
def copy_file(self, file_path_from, dir_path_to):
result = self._get_relocation_info(file_path_from, dir_path_to)
if result[0] is None:
return result[1]
file_name, file_parent_dir, new_parent_dir = result
file_old = file_parent_dir.children_files[file_name]
file_old.set_read_lock()
file_new: File = new_parent_dir.add_file(file_name)
file_new.set_write_lock()
try:
new_file_path = os.path.join(str(new_parent_dir), file_name)
new_file_nodes = self._copy_file_on_nodes(file_old, new_file_path, copy=True)
except Exception as e:
file_new.release_write_lock()
file_old.release_read_lock()
new_parent_dir.delete_file(file_name)
return 'File was not moved due to internal error.'
file_new.nodes = new_file_nodes
file_new.release_write_lock()
file_old.release_read_lock()
return ''
def remove_file(self, file_path):
parent_dir, abs_path, file_name = self.get_file(file_path)
if parent_dir is None:
return abs_path
if file_name not in parent_dir:
return 'File does not exist.'
file = parent_dir.children_files[file_name]
if not file.writable():
return 'File is blocked by another process. Deleting cannot be performed.'
file.set_write_lock()
self._delete_file_from_nodes(file)
file.release_write_lock()
parent_dir.delete_file(file_name)
return 'File was deleted'
def get_info(self, file_path):
parent_dir, abs_path, file_name = self.get_file(file_path)
if parent_dir is None:
return abs_path
if file_name not in parent_dir:
return 'File does not exist.'
file = parent_dir.children_files[file_name]
if not file.readable():
return 'File is being written. Reading cannot be performed.'
file.set_read_lock()
result = 'File is not accessed'
for datanode in file.nodes:
try:
with FTP(datanode, **self.auth_data) as ftp:
ftp.voidcmd('TYPE I')
size = ftp.size(str(file))
date = ftp.sendcmd(f"MDTM {file}").split(' ')[1]
date = datetime.strptime(date, "%Y%m%d%H%M%S").isoformat(' ')
details = ftp.sendcmd(f"LIST {file}").split(' ', 1)[1]
units = ['B', 'KB', 'MB', 'GB', 'TB']
i = 0
while size / 1000 > 2:
i += 1
size /= 1000
result = f"Size of the file is {round(size, 2)} {units[i]}"
result += f'\nLast modified: {date}\n{details}'
break
except ConnectionRefusedError:
continue
file.release_read_lock()
return result
def create_directory(self, dir_path):
parent_dir, abs_path, dir_name = self.get_dir(dir_path)
if parent_dir is None:
return abs_path
if dir_name in parent_dir or abs_path == str(parent_dir):
return 'Directory already exist.'
try:
new_dir = parent_dir.add_directory(dir_name)
new_dir.set_write_lock()
for datanode in self.datanodes:
try:
with FTP(datanode, **self.auth_data) as ftp:
ftp.voidcmd(f"MKD {abs_path}")
except ConnectionRefusedError:
continue
except Exception as e:
parent_dir.delete_directory(dir_name)
return 'Directory was not created due to internal error.'
finally:
new_dir.release_write_lock()
return ''
def open_directory(self, dir_path):
parent_dir, abs_path, dir_name = self.get_dir(dir_path)
if parent_dir is None:
return abs_path
self.namenode.work_dir.release_read_lock()
if str(parent_dir) != abs_path:
self.namenode.work_dir = parent_dir.children_directories[dir_name]
else:
self.namenode.work_dir = parent_dir
self.namenode.work_dir.set_read_lock()
return ''
def delete_directory(self, dir_path, force_delete=False):
parent_dir, abs_path, dir_name = self.get_file(dir_path)
if parent_dir is None:
return abs_path
if abs_path == str(parent_dir):
return 'You cannot delete root directory.', 0
if dir_name not in parent_dir:
return 'Directory does not exist.', 0
dir = parent_dir.children_directories[dir_name]
if not dir.writable():
return 'Directory is blocked by another process. Deleting cannot be performed.', 0
if (dir.children_directories or dir.children_files) and not force_delete:
return 'Directory is not empty. Are you sure to delete it anyway?[Y/n]', 1
parent_dir.delete_directory(dir_name)
for datanode in self.datanodes:
try:
with FTP(datanode, **self.auth_data) as ftp:
ftp.voidcmd(f"RMTREE {abs_path}")
except ConnectionRefusedError:
continue
return 'Directory was deleted', 0
def read_directory(self, dir_path=None):
if dir_path is None:
parent_dir, abs_path, dir_name = self.get_dir(str(self.namenode.work_dir))
else:
parent_dir, abs_path, dir_name = self.get_dir(dir_path)
if parent_dir is None:
return abs_path
if str(parent_dir) != abs_path:
if dir_name not in parent_dir:
return 'Directory does not exist.'
dir = parent_dir.children_directories[dir_name]
else:
dir = parent_dir
files = [name for name, obj in dir.children_files.items()]
dirs = [name for name, obj in dir.children_directories.items()]
return {'files': files, 'dirs': dirs}
|
dafunc.py
|
# vim: set ts=4 sw=4 expandtab :
'''
Copyright (c) 2016, 2017 Tim Savannah All Rights Reserved.
Licensed under the Lesser GNU Public License Version 3, LGPLv3. You should have recieved a copy of this with the source distribution as
LICENSE, otherwise it is available at https://github.com/kata198/func_timeout/LICENSE
'''
import copy
import inspect
import threading
import time
import types
import sys
from .exceptions import FunctionTimedOut
from .StoppableThread import StoppableThread
try:
from .py3_raise import raise_exception
except SyntaxError:
from .py2_raise import raise_exception
except ImportError:
from .py2_raise import raise_exception
from functools import wraps
__all__ = ('func_timeout', 'func_set_timeout')
def func_timeout(timeout, func, args=(), kwargs=None):
'''
func_timeout - Runs the given function for up to #timeout# seconds.
Raises any exceptions #func# would raise, returns what #func# would return (unless timeout is exceeded), in which case it raises FunctionTimedOut
@param timeout <float> - Maximum number of seconds to run #func# before terminating
@param func <function> - The function to call
@param args <tuple> - Any ordered arguments to pass to the function
@param kwargs <dict/None> - Keyword arguments to pass to the function.
@raises - FunctionTimedOut if #timeout# is exceeded, otherwise anything #func# could raise will be raised
If the timeout is exceeded, FunctionTimedOut will be raised within the context of the called function every two seconds until it terminates,
but will not block the calling thread (a new thread will be created to perform the join). If possible, you should try/except FunctionTimedOut
to return cleanly, but in most cases it will 'just work'.
@return - The return value that #func# gives
'''
if not kwargs:
kwargs = {}
if not args:
args = ()
ret = []
exception = []
isStopped = False
def funcwrap(args2, kwargs2):
try:
ret.append( func(*args2, **kwargs2) )
except FunctionTimedOut:
# Don't print traceback to stderr if we time out
pass
except Exception as e:
exc_info = sys.exc_info()
if isStopped is False:
# Assemble the alternate traceback, excluding this function
# from the trace (by going to next frame)
# Pytohn3 reads native from __traceback__,
# python2 has a different form for "raise"
e.__traceback__ = exc_info[2].tb_next
exception.append( e )
thread = StoppableThread(target=funcwrap, args=(args, kwargs))
thread.daemon = True
thread.start()
thread.join(timeout)
stopException = None
if thread.is_alive():
isStopped = True
class FunctionTimedOutTempType(FunctionTimedOut):
def __init__(self):
return FunctionTimedOut.__init__(self, '', timeout, func, args, kwargs)
FunctionTimedOutTemp = type('FunctionTimedOut' + str( hash( "%d_%d_%d_%d" %(id(timeout), id(func), id(args), id(kwargs))) ), FunctionTimedOutTempType.__bases__, dict(FunctionTimedOutTempType.__dict__))
stopException = FunctionTimedOutTemp
thread._stopThread(stopException)
thread.join(min(.1, timeout / 50.0))
raise FunctionTimedOut('', timeout, func, args, kwargs)
else:
# We can still cleanup the thread here..
# Still give a timeout... just... cuz..
thread.join(.5)
if exception:
raise_exception(exception)
if ret:
return ret[0]
def func_set_timeout(timeout, allowOverride=False):
'''
func_set_timeout - Decorator to run a function with a given/calculated timeout (max execution time).
Optionally (if #allowOverride is True), adds a paramater, "forceTimeout", to the
function which, if provided, will override the default timeout for that invocation.
If #timeout is provided as a lambda/function, it will be called
prior to each invocation of the decorated function to calculate the timeout to be used
for that call, based on the arguments passed to the decorated function.
For example, you may have a "processData" function whose execution time
depends on the number of "data" elements, so you may want a million elements to have a
much higher timeout than seven elements.)
If #allowOverride is True AND a kwarg of "forceTimeout" is passed to the wrapped function, that timeout
will be used for that single call.
@param timeout <float OR lambda/function> -
**If float:**
Default number of seconds max to allow function to execute
before throwing FunctionTimedOut
**If lambda/function:
If a function/lambda is provided, it will be called for every
invocation of the decorated function (unless #allowOverride=True and "forceTimeout" was passed)
to determine the timeout to use based on the arguments to the decorated function.
The arguments as passed into the decorated function will be passed to this function.
They either must match exactly to what the decorated function has, OR
if you prefer to get the *args (list of ordered args) and **kwargs ( key : value keyword args form),
define your calculate function like:
def calculateTimeout(*args, **kwargs):
...
or lambda like:
calculateTimeout = lambda *args, **kwargs : ...
otherwise the args to your calculate function should match exactly the decorated function.
@param allowOverride <bool> Default False, if True adds a keyword argument to the decorated function,
"forceTimeout" which, if provided, will override the #timeout. If #timeout was provided as a lambda / function, it
will not be called.
@throws FunctionTimedOut If time alloted passes without function returning naturally
@see func_timeout
'''
# Try to be as efficent as possible... don't compare the args more than once
# Helps closure issue on some versions of python
defaultTimeout = copy.copy(timeout)
isTimeoutAFunction = bool( issubclass(timeout.__class__, (types.FunctionType, types.MethodType, types.LambdaType, types.BuiltinFunctionType, types.BuiltinMethodType) ) )
if not isTimeoutAFunction:
if not issubclass(timeout.__class__, (float, int)):
try:
timeout = float(timeout)
except:
raise ValueError('timeout argument must be a float/int for number of seconds, or a function/lambda which gets passed the function arguments and returns a calculated timeout (as float or int). Passed type: < %s > is not of any of these, and cannot be converted to a float.' %( timeout.__class__.__name__, ))
if not allowOverride and not isTimeoutAFunction:
# Only defaultTimeout provided. Simple function wrapper
def _function_decorator(func):
return wraps(func)(lambda *args, **kwargs : func_timeout(defaultTimeout, func, args=args, kwargs=kwargs))
# def _function_wrapper(*args, **kwargs):
# return func_timeout(defaultTimeout, func, args=args, kwargs=kwargs)
# return _function_wrapper
return _function_decorator
if not isTimeoutAFunction:
# allowOverride is True and timeout is not a function. Simple conditional on every call
def _function_decorator(func):
def _function_wrapper(*args, **kwargs):
if 'forceTimeout' in kwargs:
useTimeout = kwargs.pop('forceTimeout')
else:
useTimeout = defaultTimeout
return func_timeout(useTimeout, func, args=args, kwargs=kwargs)
return wraps(func)(_function_wrapper)
return _function_decorator
# At this point, timeout IS known to be a function.
timeoutFunction = timeout
if allowOverride:
# Could use a lambda here... but want traceback to highlight the calculate function,
# and not the invoked function
def _function_decorator(func):
def _function_wrapper(*args, **kwargs):
if 'forceTimeout' in kwargs:
useTimeout = kwargs.pop('forceTimeout')
else:
useTimeout = timeoutFunction(*args, **kwargs)
return func_timeout(useTimeout, func, args=args, kwargs=kwargs)
return wraps(func)(_function_wrapper)
return _function_decorator
# Cannot override, and calculate timeout function
def _function_decorator(func):
def _function_wrapper(*args, **kwargs):
useTimeout = timeoutFunction(*args, **kwargs)
return func_timeout(useTimeout, func, args=args, kwargs=kwargs)
return wraps(func)(_function_wrapper)
return _function_decorator
# vim: set ts=4 sw=4 expandtab :
|
setup.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Miscellaneous I/O utility functions for setting up the render pipeline.
Abstracts over the setup that has to be done for various configurations of the
farm, namely between over a single node, LAN farm, and AWS farm. It additionally sets up
the appropriate flags to execute render.py. setup.py cannot be run standalone.
Attributes:
bin_to_flags (dict[str, list[dict[str, _]]]): Map from binary name to corrsponding flags.
FLAGS (absl.flags._flagvalues.FlagValues): Globally defined flags for render.py. Note that,
unlike all other apps, the FLAGS here do not directly relate to setup.py.
"""
import datetime
import json
import multiprocessing as mp
import os
import re
from shutil import which
import signal
import sys
import traceback
from pathlib import Path
from subprocess import Popen
from threading import Timer
from absl import flags, logging
dir_scripts = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir_root = os.path.dirname(dir_scripts)
sys.path.append(dir_root)
sys.path.append(os.path.join(dir_scripts, "util"))
import config
from network import Address, NetcatClient, get_os_type
from scripts.util.system_util import get_flags, image_type_paths, OSType, run_command
FLAGS = flags.FLAGS
flag_names = set()
child_pids = []
facebook360_dep_root = str(Path(os.path.abspath(__file__)).parents[2])
source_root = os.path.join(facebook360_dep_root, "source")
depth_est_src = os.path.join(source_root, "depth_estimation")
bin_to_flags = {
"TemporalBilateralFilter": get_flags(
os.path.join(depth_est_src, "TemporalBilateralFilter.cpp")
),
"ConvertToBinary": get_flags(
os.path.join(source_root, "mesh_stream", "ConvertToBinary.cpp")
),
"DerpCLI": get_flags(os.path.join(depth_est_src, "DerpCLI.cpp")),
"GenerateForegroundMasks": get_flags(
os.path.join(source_root, "render", "GenerateForegroundMasks.cpp")
),
"LayerDisparities": get_flags(os.path.join(depth_est_src, "LayerDisparities.cpp")),
"SimpleMeshRenderer": get_flags(
os.path.join(source_root, "render", "SimpleMeshRenderer.cpp")
),
"UpsampleDisparity": get_flags(
os.path.join(depth_est_src, "UpsampleDisparity.cpp")
),
}
class RepeatedTimer(object):
"""Executes a provided function at periodic intervals.
Attributes:
*args: Variable length argument list for the function to be repeatedly executed.
function (func): Arbitrary function to be repeatedly run.
interval (int): Number of seconds between consecutive runs of the function.
is_running (bool): Whether or not the function is currently running.
**kwargs: Arbitrary keyword arguments for the function to be repeatedly executed.
"""
def __init__(self, interval, function, *args, **kwargs):
"""Sets up a function to be repeatedly run in the background at fixed intervals.
Args:
interval (int): of seconds between consecutive runs of the function.
function (func): Arbitrary function to be repeatedly run.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
"""
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
"""Runs the function asynchronously."""
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
"""Starts the repeated execution asynchronously."""
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
"""Stops the repeated execution."""
self._timer.cancel()
self.is_running = False
# End of class RepeatedTimer definition
def init_facebook360_dep(gflags):
"""Sets up the environment with expected values and handlers.
Args:
gflags (absl.flags._flagvalues.FlagValues): Globally defined flags.
"""
setup_termination_handlers()
set_glog_env(gflags)
setup_logging_handler(gflags.log_dir)
# Glog wrapper doesn't see GLOG environment variables, so we need to set them manually
# GLOG environment variables override local flags
def set_glog_env(gflags):
"""Sets up GLOG environment variables.
Args:
gflags (absl.flags._flagvalues.FlagValues): Globally defined flags.
"""
gflags.alsologtostderr = "1"
gflags.stderrthreshold = "0"
output_address = Address(FLAGS.output_root)
if output_address.protocol != "s3":
gflags.log_dir = os.path.join(FLAGS.output_root, "logs")
# Create logging directory and setup logging handler
def setup_logging_handler(log_dir):
"""Sets up logging.
Args:
log_dir (str): Path to directory where logs should be saved.
"""
if log_dir:
os.makedirs(log_dir, exist_ok=True)
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
logging.get_absl_handler().use_absl_log_file(program_name, log_dir)
def terminate_handler():
"""Cleans workers before terminating the program."""
cleanup_workers()
logging.error("".join(traceback.format_stack()))
sys.exit(0)
def sigterm_handler(signal, frame):
"""Handler for any catchable signal that terminates the program.
Args:
signal (signal.signal): Type of signal.
frame (frame): Stack frame.
"""
logging.error(f"Signal handler called with signal {signal}")
terminate_handler()
def setup_termination_handlers(sigterm_handler=sigterm_handler):
"""Sets up a handler for all termination signals.
Args:
sigterm_handler (func: (signal.signal, frame) -> void, optional): Function for handling
termination signals.
"""
[
signal.signal(s, sigterm_handler)
for s in [
#signal.SIGHUP, # terminate process: terminal line hangup
signal.SIGINT, # terminate process: interrupt program
#signal.SIGQUIT, # create core image: quit program
signal.SIGILL, # create core image: illegal instruction
#signal.SIGTRAP, # create core image: trace trap
signal.SIGFPE, # create core image: floating-point exception
#signal.SIGBUS, # create core image: bus error
signal.SIGSEGV, # create core image: segmentation violation
#signal.SIGSYS, # create core image: non-existent system call invoked
#signal.SIGPIPE, # terminate process: write on a pipe with no reader
signal.SIGTERM, # terminate process: software termination signal
]
]
def define_flags():
"""Defines abseil flags for render."""
for bin in bin_to_flags:
for flag in bin_to_flags[bin]:
if flag["name"] in flag_names:
continue
cmd = f"flags.DEFINE_{flag['type']}('{flag['name']}', {flag['default']}, '{flag['descr']}')"
exec(cmd)
flag_names.add(flag["name"])
flags.DEFINE_integer("chunk_size", 1, "chunk size of work distribution to workers")
flags.DEFINE_string("cloud", "", "cloud compute service (currently supports: aws)")
flags.DEFINE_string("color_type", "color", "type of color to render")
flags.DEFINE_string("disparity_type", "disparity", "type of disparity to render")
flags.DEFINE_boolean(
"do_temporal_filter", True, "whether to run temporal filtering"
)
flags.DEFINE_boolean(
"do_temporal_masking",
False,
"use foreground masks when doing temporal filtering",
)
flags.DEFINE_boolean(
"force_recompute",
False,
"whether to recompute previously performed pipeline stages",
)
flags.DEFINE_string("master", config.LOCALHOST, "ip address of master")
flags.DEFINE_string(
"password", "", "password for NFS (only relevant for SMB mounts)"
)
flags.DEFINE_boolean("run_background_depth_estimate", True, "run background depth esimation and generate background disparity")
flags.DEFINE_boolean("run_convert_to_binary", True, "run binary conversion")
flags.DEFINE_boolean("run_depth_estimation", True, "run depth estimation")
flags.DEFINE_boolean("run_fusion", True, "run fusion")
flags.DEFINE_boolean(
"run_generate_foreground_masks", True, "run foreground mask generation"
)
flags.DEFINE_boolean("run_precompute_resizes", True, "run resizing")
flags.DEFINE_boolean(
"run_precompute_resizes_foreground", True, "run foreground mask resizing"
)
flags.DEFINE_boolean("run_simple_mesh_renderer", True, "run simple mesh renderer")
flags.DEFINE_boolean("skip_setup", False, "assume workers have already been set up")
flags.DEFINE_string(
"username", "", "username for NFS (only relevant for SMB mounts)"
)
flags.DEFINE_string("workers", config.LOCALHOST, "ip addresses of workers")
flag_names.update(
{
"chunk_size",
"cloud",
"color_type",
"disparity_type",
"do_temporal_filter",
"do_temporal_masking",
"force_recompute",
"master",
"password",
"run_generate_foreground_masks",
"run_precompute_resizes",
"run_precompute_resizes_foreground",
"run_depth_estimation",
"run_convert_to_binary",
"run_fusion",
"run_simple_mesh_renderer",
"skip_setup",
"username",
"workers",
}
)
def log_flags():
"""Prints formatted list of flags and their values."""
padding = max(len(flag_name) for flag_name in flag_names)
sorted_flags = sorted(flag_names)
for flag_name in sorted_flags:
logging.info(f"{flag_name} = {FLAGS[flag_name].value}".ljust(padding))
def docker_mounts(input_root, host_to_docker_path, username, password):
"""Constructs a list of the relevant commands to mount the external paths.
The mounts are performed as commands if on a LAN and are volume mounts if
for a single node.
Args:
input_root (str): Path to the root of inputs.
host_to_docker_path (dict[str, str]): Map of local paths to path inside container.
username (str): Username for SMB drive. Can be blank if no username is used
for the drive or if rendering locally.
password (str): Password for SMB drive. Can be blank if no password is used
for the drive or if rendering locally.
Returns:
list[str]: List of Docker mount commands
"""
if Address(input_root).protocol == "smb":
mount_creds = f"mount -t cifs -o username={username},password={password} "
mounts = [
f"{mount_creds} //{Address(external_path).ip_path} {docker_path}"
for external_path, docker_path in host_to_docker_path.items()
]
else:
mounts = [
f"--mount type=bind,source={external_path},target={docker_path} \\"
for external_path, docker_path in host_to_docker_path.items()
]
return mounts
def docker_run_cmd(ip, docker_img=config.DOCKER_IMAGE):
"""Constructs the command to run the Docker container. The container will map all
the desired endpoints to the canonical structure internally.
Args:
ip (str): IP of the master.
docker_img (str, optional): Name of the docker image.
Returns:
str: Command to run the configured Docker container.
"""
master = config.DOCKER_LOCALHOST if ip == config.LOCALHOST else FLAGS.master
host_to_docker_path = {
FLAGS.input_root: config.DOCKER_INPUT_ROOT,
FLAGS.color: os.path.join(config.DOCKER_INPUT_ROOT, image_type_paths["color"]),
FLAGS.background_disp: os.path.join(
config.DOCKER_INPUT_ROOT, image_type_paths["background_disp"]
),
FLAGS.background_color: os.path.join(
config.DOCKER_INPUT_ROOT, image_type_paths["background_color"]
),
FLAGS.foreground_masks: os.path.join(
config.DOCKER_INPUT_ROOT, image_type_paths["foreground_masks"]
),
FLAGS.output_root: config.DOCKER_OUTPUT_ROOT,
}
mounts = docker_mounts(
FLAGS.input_root, host_to_docker_path, FLAGS.username, FLAGS.password
)
if Address(FLAGS.input_root).protocol == "smb":
return f"""docker run --privileged \
-t -d {docker_img}:latest \
/bin/bash -c "mkdir {config.DOCKER_INPUT_ROOT} && mkdir {config.DOCKER_OUTPUT_ROOT} && {" && ".join(
mounts)} && python3 {config.DOCKER_SCRIPTS_ROOT}/render/worker.py --master {master}" """
else:
mount_cmds = "\n".join(mounts)
return f"""docker run {mount_cmds} \
-t -d {docker_img}:latest \
python3 {config.DOCKER_SCRIPTS_ROOT}/render/worker.py --master {master}"""
def configure_worker_daemon(ip):
"""Configures the Docker daemon to accept HTTP connections for using the local registry.
Args:
ip (str): IP of the worker.
"""
os_type = get_os_type(ip)
os_paths = {
OSType.MAC: "~/.docker/",
OSType.WINDOWS: "$env:userprofile\.docker",
OSType.LINUX: "/etc/docker/",
}
os_restarts = {
OSType.MAC: [
"""osascript -e 'quit app "Docker"'""",
"open -a Docker",
"until docker ps; sleep 2; done",
],
OSType.WINDOWS: [
"net stop docker",
"net stop com.docker.service",
'taskkill /IM "dockerd.exe" /F',
'taskkill /IM "Docker for Windows.exe" /F',
"net start docker",
"net start com.docker.service",
'& "c:\\Program Files\\Docker\\Docker\\Docker for Windows.exe"',
"while (!(docker ps)) { sleep 2 };",
],
OSType.LINUX: ["systemctl restart docker"],
}
registry = f"{FLAGS.master}:{config.DOCKER_REGISTRY_PORT}"
daemon_json = os.path.join(os_paths[os_type], config.DOCKER_DAEMON_JSON)
nc = NetcatClient(ip, config.NETCAT_PORT)
results = nc.run([f"cat {daemon_json}"])
try:
relevant_part = r"\{[^\}]*\}" # extracts section inside braces
m = re.search(relevant_part, results)
daemon_config = json.loads(m.group(0))
except Exception:
daemon_config = {}
if "insecure-registries" in daemon_config:
if registry in daemon_config["insecure-registries"]:
return
else:
daemon_config["insecure-registries"] = []
daemon_config["insecure-registries"].append(registry)
new_daemon_config = json.dumps(daemon_config)
configure_cmds = [f"echo '{new_daemon_config}' > {daemon_json}"]
configure_cmds += os_restarts[os_type]
nc.run(configure_cmds)
def spawn_worker(ip, num_containers, run_async):
"""Creates worker container(s) on the desired IP.
Args:
ip (str): IP of the machine to run the worker container.
num_containers (int): Number of containers to be run.
run_async (bool): Whether the spawning should happen synchronously or not.
"""
print(f"Spawning worker on: {ip}...")
remote_image = f"{FLAGS.master}:{config.DOCKER_REGISTRY_PORT}/{config.DOCKER_IMAGE}"
configure_worker_daemon(ip)
cmds = ["docker stop $(docker ps -a -q)", f"docker pull {remote_image}"]
cmds += [docker_run_cmd(ip, remote_image)] * num_containers
nc = NetcatClient(ip, config.NETCAT_PORT)
os_type = get_os_type(ip)
if os_type == OSType.LINUX:
nc.run_script("setup_gpu.sh")
if run_async:
nc.run_async(cmds)
else:
nc.run(cmds)
print(f"Completed setup of {ip}!")
def spawn_worker_local(replica):
"""Starts a worker locally.
Args:
replica (int): Replica ID of the worker being spawned.
"""
# We use Popen instead of run_command, since worker process is backgrounded
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S.%f")
worker_logfile = os.path.join(
FLAGS.log_dir, f"Worker-{timestamp}-{replica}"
)
os.makedirs(os.path.dirname(worker_logfile), exist_ok=True)
with open(worker_logfile, "w") as fp:
proc = Popen(
[
"python",
f"scripts/winrender/worker.py",
f"--master={FLAGS.master}",
],
stdout=fp,
stderr=fp,
)
global child_pids
child_pids.append(proc.pid)
def setup_master(base_params):
"""Sets up the master node for rendering.
Args:
base_params (dict[str, _]): Map of all the FLAGS defined in render.py.
"""
protocol = Address(base_params["input_root"]).protocol
try:
if protocol == "s3":
run_command("sudo service rabbitmq-server start")
else:
run_command("service rabbitmq-server start")
except Exception:
runtime = "nvidia" if which("nvidia-docker") else ""
cmd = f"""docker run --runtime={runtime} -p 5672:5672 -p 15672:15672 \
-d {config.DOCKER_IMAGE}:latest rabbitmq-server start"""
run_command(cmd)
def setup_workers(base_params):
"""Sets up the worker nodes for rendering.
Args:
base_params (dict[str, _]): Map of all the FLAGS defined in render.py.
"""
processes = []
for worker in FLAGS.workers.split(","):
if ":" in worker:
ip, num_replicas = worker.split(":")
num_replicas = int(num_replicas)
else:
ip = worker
num_replicas = 1
if ip == config.LOCALHOST:
for replica in range(num_replicas):
spawn_worker_local(replica)
else:
processes.append(
mp.Process(target=spawn_worker, args=(ip, num_replicas, False))
)
for process in processes:
process.start()
for process in processes:
process.join()
def cleanup_workers():
"""Destroys the worker process if running locally."""
for child_pid in child_pids:
os.kill(child_pid, signal.SIGTERM)
|
test_closing.py
|
from fixtures import * # noqa: F401,F403
from flaky import flaky
from pyln.client import RpcError, Millisatoshi
from shutil import copyfile
from pyln.testing.utils import SLOW_MACHINE
from utils import (
only_one, sync_blockheight, wait_for, TIMEOUT,
account_balance, first_channel_id, closing_fee, TEST_NETWORK,
scriptpubkey_addr, calc_lease_fee, EXPERIMENTAL_FEATURES,
check_utxos_channel, anchor_expected, check_coin_moves,
check_balance_snaps
)
import os
import queue
import pytest
import re
import subprocess
import threading
import unittest
@pytest.mark.developer("Too slow without --dev-bitcoind-poll")
def test_closing_simple(node_factory, bitcoind, chainparams):
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2 = node_factory.line_graph(2, opts={'plugin': coin_mvt_plugin})
chan = l1.get_channel_scid(l2)
channel_id = first_channel_id(l1, l2)
fee = closing_fee(3750, 2) if not chainparams['elements'] else 4263
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
# The entry in the channels table should still be there
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None)],
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None)],
}
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
fut.result(TIMEOUT)
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_disconnected_notify(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l2.stop()
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'close',
l2.info['id'],
'5']).decode('utf-8').splitlines()
assert out[0] == '# peer is offline, will negotiate once they reconnect (5 seconds before unilateral close).'
assert out[1] == '# Timed out, forcing close.'
assert not any([line.startswith('#') for line in out[2:]])
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fundchannel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@pytest.mark.slow_test
def test_closing_torture(node_factory, executor, bitcoind):
# We set up a fully-connected mesh of N nodes, then try
# closing them all at once.
amount = 10**6
num_nodes = 10 # => 45 channels (36 seconds on my laptop)
if node_factory.valgrind:
num_nodes -= 4 # => 15 (135 seconds)
nodes = node_factory.get_nodes(num_nodes)
# Make sure bitcoind has plenty of utxos
bitcoind.generate_block(num_nodes)
# Give them all plenty of UTXOs, make sure they see them
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
addr = nodes[i].rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txs = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
nodes[i].rpc.connect(nodes[j].info['id'], 'localhost', nodes[j].port)
txs.append(nodes[i].rpc.fundchannel(nodes[j].info['id'], amount)['txid'])
# Make sure they're all in, then lock them in.
bitcoind.generate_block(1, wait_for_mempool=txs)
# Wait for them all to be CHANNELD_NORMAL
for n in nodes:
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in n.rpc.listpeers()['peers']))
# Start closers: can take a long time under valgrind!
futures = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
futures.append(executor.submit(nodes[i].rpc.close, nodes[j].info['id']))
futures.append(executor.submit(nodes[j].rpc.close, nodes[i].info['id']))
# Wait for close to finish
close_txs = set()
for f in futures:
# If one side completes closing, we'll get an error here 'Peer has no active channel'
try:
close_txs.add(f.result(TIMEOUT)['txid'])
except RpcError as err:
assert err.error['message'] == 'Peer has no active channel'
# Should have one close for each open.
assert len(close_txs) == len(txs)
# Get closes confirmed
bitcoind.generate_block(100, wait_for_mempool=list(close_txs))
# And make sure they hangup.
for n in nodes:
wait_for(lambda: n.rpc.listpeers()['peers'] == [])
@unittest.skipIf(TEST_NETWORK != 'regtest', 'FIXME: broken under elements')
@pytest.mark.slow_test
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/11000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 11000, 15000, 7400], [8000, 6000, 1001, 100]]
balance = [False, True]
num_peers = len(feerates) * len(balance)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for b in balance:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.balance = balance
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.balance:
l1.pay(p, 100000000)
# Now close all channels (not unilaterally!)
closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'may_reconnect': True},
{'may_reconnect': True}])
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
l1.rpc.close(l2.info['id'])
l1.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
# Now verify that the closing tx is in the mempool.
bitcoind.generate_block(6, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1, l2])
for n in [l1, l2]:
# Ensure we actually got a mutual close.
n.daemon.wait_for_log(r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_specified_destination(node_factory, bitcoind, chainparams):
l1, l2, l3, l4 = node_factory.get_nodes(4)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
chan12, _ = l1.fundchannel(l2, 10**6)
chan13, _ = l1.fundchannel(l3, 10**6)
chan14, _ = l1.fundchannel(l4, 10**6)
l1.pay(l2, 100000000)
l1.pay(l3, 100000000)
l1.pay(l4, 100000000)
bitcoind.generate_block(5)
addr = chainparams['example_addr']
l1.rpc.close(chan12, None, addr)
l1.rpc.call('close', {'id': chan13, 'destination': addr})
l1.rpc.call('close', [chan14, None, addr])
l1.daemon.wait_for_logs([' to CLOSINGD_SIGEXCHANGE'] * 3)
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == 3)
# Now grab the close transaction
closetxs = {}
for i, n in enumerate([l2, l3, l4]):
billboard = only_one(l1.rpc.listpeers(n.info['id'])['peers'][0]['channels'])['status'][0]
m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard)
closetxs[n] = m.group(1)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# l1 can't spent the output to addr.
for txid in closetxs.values():
assert not l1.daemon.is_in_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
# Check the txid has at least 1 confirmation
for n, txid in closetxs.items():
n.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
for n in [l2, l3, l4]:
# Make sure both nodes have grabbed their close tx funds
closetx = closetxs[n]
outputs = n.rpc.listfunds()['outputs']
assert closetx in set([o['txid'] for o in outputs])
output_num2 = [o for o in outputs if o['txid'] == closetx][0]['output']
output_num1 = 0 if output_num2 == 1 else 1
# Check the another address is addr
assert addr == scriptpubkey_addr(bitcoind.rpc.gettxout(closetx, output_num1)['scriptPubKey'])
assert 1 == bitcoind.rpc.gettxout(closetx, output_num1)['confirmations']
def closing_negotiation_step(node_factory, bitcoind, chainparams, opts):
def feerate_for(target, minimum=0, maximum=10000000):
"""Binary search to find feerate"""
assert minimum != maximum
mid = (minimum + maximum) // 2
mid_fee = closing_fee(mid, 1)
if mid_fee > target:
return feerate_for(target, minimum, mid)
elif mid_fee < target:
return feerate_for(target, mid, maximum)
else:
return mid
orate = feerate_for(21000) # closing fee negotiation starts at 21000
prate = feerate_for(20000) # closing fee negotiation starts at 20000
opener, peer = node_factory.line_graph(2, opts=[{'feerates': (orate, orate, orate, orate)},
{'feerates': (prate, prate, prate, prate)}])
opener_id = opener.info['id']
peer_id = peer.info['id']
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
if opts['close_initiated_by'] == 'opener':
opener.rpc.close(peer_id=peer_id, fee_negotiation_step=opts['fee_negotiation_step'])
else:
assert opts['close_initiated_by'] == 'peer'
peer.rpc.close(peer_id=opener_id, fee_negotiation_step=opts['fee_negotiation_step'])
# Get the proclaimed closing fee from the two nodes' statuses
status_agreed_regex = re.compile("agreed on a closing fee of ([0-9]+) satoshi")
# [fee_from_opener_status, fee_from_peer_status]
fees_from_status = [None, None]
def get_fee_from_status(node, peer_id, i):
nonlocal fees_from_status
peer = only_one(node.rpc.listpeers(peer_id)['peers'])
channel = only_one(peer['channels'])
status = channel['status'][0]
m = status_agreed_regex.search(status)
if not m:
return False
fees_from_status[i] = int(m.group(1))
return True
wait_for(lambda: get_fee_from_status(opener, peer_id, 0))
wait_for(lambda: get_fee_from_status(peer, opener_id, 1))
assert opts['expected_close_fee'] == fees_from_status[0]
assert opts['expected_close_fee'] == fees_from_status[1]
# Get the closing transaction from the bitcoind mempool and get its fee
mempool = None
mempool_tx_ids = None
def get_mempool_when_size_1():
nonlocal mempool, mempool_tx_ids
mempool = bitcoind.rpc.getrawmempool(True)
mempool_tx_ids = list(mempool.keys())
return len(mempool_tx_ids) == 1
wait_for(get_mempool_when_size_1)
close_tx_id = mempool_tx_ids[0]
fee_mempool = round(mempool[close_tx_id]['fee'] * 10**8)
assert opts['expected_close_fee'] == fee_mempool
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_30pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 30%"""
opts = {}
opts['fee_negotiation_step'] = '30%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20537
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20233
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_100pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 100%"""
opts = {}
opts['fee_negotiation_step'] = '100%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20001
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
# The close fee of 20499 looks strange in this case - one would expect
# to have a number close to 21000. This is because
# * the range is initially set to [20000 (peer), 21000 (opener)]
# * the opener is always first to propose, he uses 50% step, so he proposes 20500
# * the range is narrowed to [20001, 20499] and the peer proposes 20499
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_1sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 1sat"""
opts = {}
opts['fee_negotiation_step'] = '1'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20989
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20010
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_700sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 700sat"""
opts = {}
opts['fee_negotiation_step'] = '700'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20151
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@pytest.mark.developer("needs dev-disable-commit-after")
def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an incoming HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'dev-disable-commit-after': 1,
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'dev-disable-commit-after': 1,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('got commitsig')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
# The first needle will match, but since we don't have a direct output
# for l2 it won't result in an output, hence the comment:
# r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# l1 loses all of their channel balance to the peer, as penalties
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['penalty'], None, None), ('external', ['penalty'], None, None)],
}
# l2 sweeps all of l1's closing outputs
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('cid1', ['penalty'], ['to_wallet'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
# We use a subset of tags in expected_2 that are used in expected_1
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
@pytest.mark.developer("needs dev-disable-commit-after")
def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an outgoing HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'dev-disable-commit-after': 3,
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'dev-disable-commit-after': 3,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('dev-disable-commit-after: disabling')
assert not l2.daemon.is_in_log('dev-disable-commit-after: disabling')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# l1 loses all of their channel balance to the peer, as penalties
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['penalty'], None, None), ('external', ['penalty'], None, None), ('external', ['penalty'], None, None)],
}
# l2 sweeps all of l1's closing outputs
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('cid1', ['penalty'], ['to_wallet'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
# We use a subset of tags in expected_2 that are used in expected_1
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_falls_behind(node_factory, bitcoind):
'''
If our peer falls too far behind/doesn't send us an update for
their blockheight, the lessor fails the channel
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# sink the funding transaction
bitcoind.generate_block(1)
# stop l1
l1.stop()
# advance blockchain 1008 blocks, the lessor should drop to chain
bitcoind.generate_block(1008)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_log('Offline peer is too far behind, terminating')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.developer("requres 'dev-queryrates'")
@pytest.mark.slow_test
def test_channel_lease_post_expiry(node_factory, bitcoind, chainparams):
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'plugin': coin_mvt_plugin}
l1, l2, = node_factory.get_nodes(2, opts=opts)
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
# l1 leases a channel from l2
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
channel_id = first_channel_id(l1, l2)
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
# l2 attempts to close a channel that it leased, should fail
with pytest.raises(RpcError, match=r'Peer leased this channel from us'):
l2.rpc.close(l1.get_channel_scid(l2))
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 115')
# We need to give l1-l2 time to update their blockheights
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(32)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
# l1<->l2 mutual close should work
chan = l1.get_channel_scid(l2)
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.rpc.close(chan)
l2.daemon.wait_for_log('State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE')
bitcoind.generate_block(2)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
l2.daemon.wait_for_log('Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
channel_mvts_1 = [
{'type': 'chain_mvt', 'credit': 506432000, 'debit': 0, 'tags': ['channel_open', 'opener', 'leased']},
{'type': 'channel_mvt', 'credit': 0, 'debit': 6432000, 'tags': ['lease_fee'], 'fees': '0msat'},
{'type': 'channel_mvt', 'credit': 0, 'debit': 10000, 'tags': ['invoice'], 'fees': '0msat'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 499990000, 'tags': ['channel_close']},
]
channel_mvts_2 = [
{'type': 'chain_mvt', 'credit': 500000000, 'debit': 0, 'tags': ['channel_open', 'leased']},
{'type': 'channel_mvt', 'credit': 6432000, 'debit': 0, 'tags': ['lease_fee'], 'fees': '0msat'},
{'type': 'channel_mvt', 'credit': 10000, 'debit': 0, 'tags': ['invoice'], 'fees': '0msat'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 506442000, 'tags': ['channel_close']},
]
check_coin_moves(l1, channel_id, channel_mvts_1, chainparams)
check_coin_moves(l2, channel_id, channel_mvts_2, chainparams)
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_unilat_closes(node_factory, bitcoind):
'''
Check that channel leases work
l1-l2: l1 leases funds from l2; l1 goes to chain unilaterally
l2-l3: l2 leases funds from l3; l3 goes to chain unilaterally
'''
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'funder-lease-requests-only': False}
l1, l2, l3 = node_factory.get_nodes(3, opts=opts)
# Allow l2 some warnings
l2.allow_warning = True
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l3.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# l2 leases a channel from l3
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
rates = l2.rpc.dev_queryrates(l3.info['id'], amount, amount)
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers']) == 0)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.rpc.fundchannel(l3.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate), minconf=0,
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
l3.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l3.rpc.listchannels(l3.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
inv = l2.rpc.invoice(10**4, '3', 'no_3')
l3.rpc.pay(inv['bolt11'])
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2, l3])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 110')
l2.stop()
# unilateral close channels l1<->l2 & l3<->l2
l1.rpc.close(l2.info['id'], 1)
l3.rpc.close(l2.info['id'], 1, force_lease_closed=True)
# Wait til to_self_delay expires, l1 should claim to_local back
bitcoind.generate_block(10, wait_for_mempool=2)
l1.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
bitcoind.generate_block(1, wait_for_mempool=1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal OUR_DELAYED_RETURN_TO_WALLET')
assert len(l1.rpc.listfunds()['outputs']) == 2
l2.start()
search_start = l2.daemon.logsearch_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 40.*')
utxo1 = re.match('.* adding utxo to watch (.*), csv .*', log).group(1)
l2.daemon.logsearch_start = search_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 1')
utxo3 = re.match('.* adding utxo to watch (.*), csv 1', log).group(1)
# we *shouldn't* be able to spend it, there's a lock on it
with pytest.raises(RpcError, match='UTXO .* is csv locked'):
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# we *can* spend the 1csv lock one
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo3])
# This can timeout, so do it in four easy stages.
for i in range(4):
bitcoind.generate_block(4032 // 4)
sync_blockheight(bitcoind, [l2, l3])
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# l3 cleans up their to-self after their lease expires
assert l3.daemon.is_in_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessor_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessee can recover funds if lessor cheats
'''
balance_snaps = os.path.join(os.getcwd(), 'tests/plugins/balance_snaps.py')
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_warning': True,
'plugin': balance_snaps},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True,
'plugin': balance_snaps}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start(wait_for_bitcoind_sync=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
sync_blockheight(bitcoind, [l2])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l1.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l1 while l1 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l1.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l1.start()
sync_blockheight(bitcoind, [l1])
l1.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l2.daemon.wait_for_log('Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessee_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessor can recover funds if lessee cheats
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-msat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l1
l1.stop()
l1_db_path = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l1_db_path_bak = os.path.join(l1.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l1_db_path, l1_db_path_bak)
l1.start()
l1.rpc.connect(l1.info['id'], 'localhost', l1.port)
sync_blockheight(bitcoind, [l1])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l1's database
l1.stop()
l2.stop()
copyfile(l1_db_path_bak, l1_db_path)
# start l1 and force close channel with l2 while l2 is still offline
l1.start()
sync_blockheight(bitcoind, [l1])
l1.rpc.close(l2.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l2.start()
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l1.daemon.wait_for_logs(['Grinding for to_remote',
'Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by'])
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_fulfill(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts their htlc fulfill tx
l3 comes back online, sees l2's cheat. takes funds from htlc fulfill tx.
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
balance_snaps = os.path.join(os.getcwd(), 'tests/plugins/balance_snaps.py')
l1, l2, l3, l4 = node_factory.line_graph(4,
opts=[{'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None},
{'plugin': [coin_mvt_plugin, balance_snaps],
'disable-mpp': None,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'plugin': [coin_mvt_plugin, balance_snaps],
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'dev-no-reconnect': None,
'may_reconnect': True}],
wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
l1.rpc.waitsendpay(inv['payment_hash'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
l2.rpc.waitsendpay(inv['payment_hash'])
# now we send one 'sticky' htlc: l4->l1
amt = 10**8 // 2
sticky_inv = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv['payment_hash'], payment_secret=sticky_inv['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
# l2 moves on for closed l3
bitcoind.generate_block(1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX'])
l3.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
bitcoind.generate_block(1)
l3.daemon.wait_for_log('Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('cid1', ['htlc_fulfill'], ['htlc_fulfill'], 'C'), ('external', ['penalized'], None, None)],
'C': [('external', ['penalized'], None, None)],
}
expected_3 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'E')],
'C': [('cid1', ['penalty'], ['to_wallet'], 'D')],
'D': [('wallet', ['deposit'], None, None)],
'E': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_3['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
expected_3['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l2, [channel_id], expected_2, filter_channel=channel_id)
check_utxos_channel(l3, [channel_id], expected_3, tags, filter_channel=channel_id)
if not chainparams['elements']:
# Also check snapshots
expected_bals_2 = [
{'blockheight': 101, 'accounts': [{'balance': '0msat'}]},
{'blockheight': 108, 'accounts': [{'balance': '995433000msat'}, {'balance': '500000000msat'}, {'balance': '499994999msat'}]},
# There's a duplicate because we stop and restart l2 twice
# (both times at block 108)
{'blockheight': 108, 'accounts': [{'balance': '995433000msat'}, {'balance': '500000000msat'}, {'balance': '499994999msat'}]},
]
check_balance_snaps(l2, expected_bals_2)
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_timeout(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
^---> l5
l1 pushes money to l5, who doesn't fulfill (freezing htlc across l2-l3)
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts the timeout htlc_tx + fulfill htlc_tx
both of which have a delay. l2 goes ahead and 'steals back' their
output + the htlc they fulfill
l3 comes back online, sees l2's cheat. takes funds from htlc timeout tx
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4, l5 = node_factory.get_nodes(
5,
opts=[
{
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'dev-no-reconnect': None,
}, {
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
'allow_broken_log': True,
}
]
)
node_factory.join_nodes([l1, l2, l3, l4], wait_for_announce=True)
node_factory.join_nodes([l3, l5], wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
# now we send two 'sticky' htlcs, l1->l5 + l4->l1
amt = 10**8 // 2
sticky_inv_1 = l5.rpc.invoice(amt, '2', 'sticky')
route = l1.rpc.getroute(l5.info['id'], amt, 1)['route']
l1.rpc.sendpay(route, sticky_inv_1['payment_hash'], payment_secret=sticky_inv_1['payment_secret'])
l5.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
sticky_inv_2 = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv_2['payment_hash'], payment_secret=sticky_inv_2['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 2)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.lightning_dir, chainparams['name'], 'lightningd.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2, now back a bit. force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
# l2 moves on for closed l3
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 16 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# after 5 blocks, l2 reclaims both their DELAYED_OUTPUT_TO_US and their delayed output
bitcoind.generate_block(5, wait_for_mempool=0)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US',
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_UNILATERAL/DELAYED_OUTPUT_TO_US'])
bitcoind.generate_block(10, wait_for_mempool=2)
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/THEIR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX',
'Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM',
'Propose handling THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
# Make sure we've broadcast the tx we expect (other channels shutting down can create
# unrelated txs!)
# In theory this could have occurred before all the previous loglines appeared.
l3.daemon.logsearch_start = 0
line = l3.daemon.wait_for_log(r'Broadcasting OUR_PENALTY_TX \([0-9a-f]*\) to resolve THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
tx = re.search(r'\(([0-9a-f]*)\)', line).group(1)
txid = bitcoind.rpc.decoderawtransaction(tx)['txid']
bitcoind.generate_block(1, wait_for_mempool=[txid])
l3.daemon.wait_for_log('Resolved THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('cid1', ['htlc_fulfill'], ['htlc_fulfill'], 'E'), ('cid1', ['delayed_to_us'], ['to_wallet'], 'F'), ('cid1', ['htlc_timeout'], ['htlc_timeout'], 'C')],
'C': [('external', ['penalized'], None, None)],
'E': [('cid1', ['htlc_tx'], ['to_wallet'], 'G')],
'F': [('wallet', ['deposit'], None, None)],
'G': [('wallet', ['deposit'], None, None)]
}
expected_3 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'E'), ('external', ['stolen'], None, None), ('external', ['htlc_timeout'], ['htlc_timeout'], 'C')],
'C': [('cid1', ['penalty'], ['to_wallet'], 'D')],
'D': [('wallet', ['deposit'], None, None)],
'E': [('external', ['stolen'], None, None)]
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_3['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
expected_3['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l2, [channel_id], expected_2, filter_channel=channel_id)
check_utxos_channel(l3, [channel_id], expected_3, tags, filter_channel=channel_id)
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_normal(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed.
'''
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(options={'dev-disable-commit-after': 1},
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(options={'dev-disable-commit-after': 1,
'watchtime-blocks': to_self_delay,
'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
channel_id = first_channel_id(l1, l2)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 8):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the order in which l2 generated RBF transactions
# would be acceptable to Bitcoin.
for tx in rbf_txes:
# Use the bcli interface as well, so that we also check the
# bcli interface.
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# And l2 should consider it in its listfunds.
assert(len(l2.rpc.listfunds()['outputs']) >= 1)
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('cid1', ['penalty'], ['to_wallet'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
check_utxos_channel(l2, [channel_id], expected_2)
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_burn(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed and we are willing to burn
it all up to spite the thief.
'''
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding lightningd
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(options={'dev-disable-commit-after': 1},
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(options={'dev-disable-commit-after': 1,
'watchtime-blocks': to_self_delay,
'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
channel_id = first_channel_id(l1, l2)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 10):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the last two txes can be broadcast.
# These should donate the total amount to miners.
rbf_txes = rbf_txes[-2:]
for tx in rbf_txes:
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# l2 donated it to the miners, so it owns nothing
assert(len(l2.rpc.listfunds()['outputs']) == 0)
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('cid1', ['penalty'], ['to_miner'], 'C'), ('cid1', ['penalty'], ['to_miner'], 'D')],
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
check_utxos_channel(l2, [channel_id], expected_2)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where opener immediately drops to chain"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
# Make locktime different, as we once had them reversed!
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'plugin': coin_mvt_plugin},
{'watchtime-blocks': 10,
'plugin': coin_mvt_plugin}],
fundchannel=False)
l1.fundwallet(10**7)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2 = node_factory.line_graph(2, opts={'plugin': coin_mvt_plugin})
channel_id = first_channel_id(l1, l2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'watchtime-blocks': 201, 'cltv-delta': 101,
'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500)},
{'watchtime-blocks': 201, 'cltv-delta': 101}])
inv = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1, wait_for_mempool=1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Must be dust!
inv = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=1)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=2)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(TIMEOUT)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('cid1', ['delayed_to_us'], ['to_wallet'], 'C'), ('cid1', ['htlc_timeout'], ['htlc_timeout'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('cid1', ['htlc_tx'], ['to_wallet'], 'E')],
'E': [('wallet', ['deposit'], None, None)]
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_timeout'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
# We use a subset of tags in expected_2 that are used in expected_1
tags = check_utxos_channel(l1, [channel_id], expected_1)
# Passing the same tags in to the check again will verify that the
# txids 'unify' across both event sets (in other words, we're talking
# about the same tx's when we say 'A' in each
check_utxos_channel(l2, [channel_id], expected_2, tags)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman_simple(node_factory, bitcoind):
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin,
'disconnect': disconnects},
{}])
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_2 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
# This is ugly, but this wallet deposit is either unspent or used
# in the next channel open
'A': [('wallet', ['deposit'], ((['withdrawal'], 'F'), (None, None))), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'1': [('wallet', ['deposit'], ['withdrawal'], 'F')],
'B': [('cid1', ['delayed_to_us'], ['to_wallet'], 'C'), ('cid1', ['htlc_fulfill'], ['htlc_fulfill'], 'D'), ('external', ['to_them'], None, None)],
'C': [('wallet', ['deposit'], None, None)],
'D': [('cid1', ['htlc_tx'], ['to_wallet'], 'E')],
'E': [('wallet', ['deposit'], None, None)],
'F': [('wallet', ['deposit'], None, None), ('cid2', ['channel_open', 'opener'], None, None)]
}
expected_1 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'D'), ('wallet', ['channel_close'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
chan2_id = first_channel_id(l2, l3)
tags = check_utxos_channel(l2, [channel_id, chan2_id], expected_2)
check_utxos_channel(l1, [channel_id, chan2_id], expected_1, tags)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman_their_unilateral_in(node_factory, bitcoind):
""" This is the same as test_onchain_middleman, except that
node l1 drops to chain, not l2, reversing the unilateral
handling logic """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1_disconnects = ['=WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l2_disconnects = ['-WIRE_UPDATE_FULFILL_HTLC']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin,
'disconnect': l1_disconnects},
{'plugin': coin_mvt_plugin,
'disconnect': l2_disconnects},
{}])
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
c12, _ = l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Make sure l3 sees gossip for channel now; it can get upset
# and give bad gossip msg if channel is closed before it sees
# node announcement.
wait_for(lambda: l3.rpc.listchannels(c12)['channels'] != [])
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l1 will drop to chain.
l1.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('THEIR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, immediately
l2.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
l1.bitcoin.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l1 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_2 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
# This is ugly, but this wallet deposit is either unspent or used
# in the next channel open
'A': [('wallet', ['deposit'], ((['withdrawal'], 'D'), (None, None))), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'1': [('wallet', ['deposit'], ['withdrawal'], 'D')],
'B': [('external', ['to_them'], None, None), ('wallet', ['channel_close'], None, None), ('cid1', ['htlc_fulfill'], ['to_wallet'], 'C')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None), ('cid2', ['channel_open', 'opener'], None, None)]
}
expected_1 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'C'), ('cid1', ['delayed_to_us'], ['to_wallet'], 'E')],
'E': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
chan2_id = first_channel_id(l2, l3)
tags = check_utxos_channel(l2, [channel_id, chan2_id], expected_2)
check_utxos_channel(l1, [channel_id, chan2_id], expected_1, tags)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_their_unilateral_out(node_factory, bitcoind):
""" Very similar to the test_onchain_middleman, except there's no
middleman, we simply want to check that our offered htlc
on their unilateral returns to us (and is accounted
for correctly) """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin},
{'disconnect': disconnects,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
route = l1.rpc.getroute(l2.info['id'], 10**8, 1)["route"]
assert len(route) == 1
q = queue.Queue()
def try_pay():
try:
# rhash is fake (so is payment_secret)
rhash = 'B1' * 32
l1.rpc.sendpay(route, rhash, payment_secret=rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l2.daemon.wait_for_log('sendrawtx exit 0')
l2.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC')
# l1 should wait til to_self_delay (10), then fulfill onchain
l2.bitcoin.generate_block(9)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
l2.daemon.wait_for_log('Ignoring output .*_UNILATERAL/THEIR_HTLC')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# 100 blocks after last spend, l1+l2 should be done.
l2.bitcoin.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l2, channel_id) == 0
assert account_balance(l1, channel_id) == 0
# Graph of coin_move events we expect
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
# This is ugly, but this wallet deposit is either unspent or used
# in the next channel open
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('cid1', ['htlc_timeout'], ['to_wallet'], 'C')],
'C': [('wallet', ['deposit'], None, None)],
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_timeout'], None, None)],
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
def test_listfunds_after_their_unilateral(node_factory, bitcoind):
"""We keep spending info around for their unilateral closes.
Make sure we show the address.
"""
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# FIXME: We can get warnings from unilteral changes, since we treat
# such errors a soft because LND.
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin,
"allow_warning": True},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# listfunds will show 1 output change, and channels.
assert len([o for o in l1.rpc.listfunds()['outputs'] if not o['reserved']]) == 1
l1.stop()
l2.rpc.close(l1.info['id'], unilateraltimeout=1)
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(100)
l1.start()
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 2)
assert all(['address' in o for o in l1.rpc.listfunds()['outputs']])
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[
{
'may_reconnect': True,
'allow_warning': True,
}, {
'may_reconnect': True,
'disconnect': disconnects,
}
])
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@pytest.mark.skip("Lisa, please fix this!")
@pytest.mark.developer("needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None,
'plugin': coin_mvt_plugin},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects, options={'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for unilateral_close set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('cid1', ['htlc_timeout'], ['ignored'], 'C')],
'C': [('wallet', ['deposit'], None, None)],
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_timeout'], None, None)],
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
@pytest.mark.developer("needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l2.daemon.wait_for_log('htlc 0: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 11000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l2.daemon.wait_for_log('htlc 1: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l2.daemon.wait_for_log('htlc 2: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
inv = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)
h = inv['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects,
feerates=(7500, 7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('Handed peer, entering loop')
l2.fundchannel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:.*\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = scriptpubkey_addr(txout['scriptPubKey'])
assert(addr == o['address'])
addr = l1.bitcoin.getnewaddress()
l1.rpc.withdraw(addr, "all")
@pytest.mark.developer("needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not node_factory.valgrind:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
@flaky
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
# There's a workaround in channeld, that it treats incoming errors
# before both sides are locked in as warnings; this happens in
# this test, so l1 reports the error as a warning!
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
# This will block, as l12 will send an error but l2 will retry.
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
fut.result(TIMEOUT)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
l2.rpc.close(l1.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
# Expect 1 for change address, plus 1 for the funding address of the actual
# funding tx.
addr = l1.rpc.call('dev-listaddrs', [keyidx + 2])['addresses'][-1]
# the above used to be keyidx + 3, but that was when `fundchannel`
# used the `txprepare`-`txdiscard`-`txprepare` trick, which skipped
# one address in the discarded tx.
# Now we use PSBTs, which means we never discard and skip an address.
# Now, if we specify upfront and it's OK, all good.
l1.stop()
# We need to prepend the segwit version (0) and push opcode (14).
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript']
l1.start()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_invalid_upfront_shutdown_script(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac00"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.fundchannel(l2, 1000000, False)
@pytest.mark.developer("needs to set upfront_shutdown_script")
@pytest.mark.slow_test
def test_segwit_shutdown_script(node_factory, bitcoind, executor):
"""
Try a range of future segwit versions as shutdown scripts. We create many nodes, so this is quite slow under valgrind
"""
l1 = node_factory.get_node(allow_warning=True)
# BOLT #2:
# 5. if (and only if) `option_shutdown_anysegwit` is negotiated:
# * `OP_1` through `OP_16` inclusive, followed by a single push of 2 to 40 bytes
# (witness program versions 1 through 16)
edge_valid = ['51020000', '5128' + '00' * 0x28,
'60020000', '6028' + '00' * 0x28]
other_valid = ['52020000', '5228' + '00' * 0x28,
'53020000', '5328' + '00' * 0x28,
'54020000', '5428' + '00' * 0x28,
'55020000', '5528' + '00' * 0x28,
'56020000', '5628' + '00' * 0x28,
'57020000', '5728' + '00' * 0x28,
'58020000', '5828' + '00' * 0x28,
'59020000', '5928' + '00' * 0x28,
'5A020000', '5A28' + '00' * 0x28,
'5B020000', '5B28' + '00' * 0x28,
'5C020000', '5C28' + '00' * 0x28,
'5D020000', '5D28' + '00' * 0x28,
'5E020000', '5E28' + '00' * 0x28,
'5F020000', '5F28' + '00' * 0x28]
invalid = ['50020000', # Not OP_1-OP_16
'61020000', # Not OP_1-OP_16
'5102000000', # Extra bytes
'510100', # Too short
'5129' + '00' * 0x29] # Too long
# Don't stress CI; just test edge cases
if SLOW_MACHINE:
valid = edge_valid
else:
valid = edge_valid + other_valid
# More efficient to create them all up-front.
nodes = node_factory.get_nodes(len(valid) + len(invalid))
# Give it one UTXO to spend for each node.
addresses = {}
for n in nodes:
addresses[l1.rpc.newaddr()['bech32']] = (10**6 + 100000) / 10**8
bitcoind.rpc.sendmany("", addresses)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == len(addresses))
# FIXME: Since we don't support other non-v0 encodings, we need a protocol
# test for this (we're actually testing our upfront check, not the real
# shutdown one!),
for script in valid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
for script in invalid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.rpc.fundchannel(l2.info['id'], 10**6)
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Needs anchor_outputs")
@pytest.mark.developer("needs to set dev-disconnect")
def test_closing_higherfee(node_factory, bitcoind, executor):
"""With anchor outputs we can ask for a *higher* fee than the last commit tx"""
# We change the feerate before it starts negotiating close, so it aims
# for *higher* than last commit tx.
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500),
'disconnect': ['-WIRE_CLOSING_SIGNED']},
{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500)}])
# This will trigger disconnect.
fut = executor.submit(l1.rpc.close, l2.info['id'])
l1.daemon.wait_for_log('dev_disconnect')
# Now adjust fees so l1 asks for more on reconnect.
l1.set_feerates((30000,) * 4, False)
l2.set_feerates((30000,) * 4, False)
l1.restart()
l2.restart()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# This causes us to *exceed* previous requirements!
l1.daemon.wait_for_log(r'deriving max fee from rate 30000 -> 16440sat \(not 1000000sat\)')
# This will fail because l1 restarted!
with pytest.raises(RpcError, match=r'Channel forgotten before proper close.'):
fut.result(TIMEOUT)
# But we still complete negotiation!
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
@pytest.mark.developer("needs dev_disconnect")
def test_htlc_rexmit_while_closing(node_factory, executor):
"""Retranmitting an HTLC revocation while shutting down should work"""
# l1 disconnects after sending second COMMITMENT_SIGNED.
# Then it stops receiving after sending WIRE_SHUTDOWN (which is before it
# reads the revoke_and_ack).
disconnects = ['+WIRE_COMMITMENT_SIGNED*2',
'xWIRE_SHUTDOWN']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
# Start payment, will disconnect
l1.pay(l2, 200000)
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
# Tell it to close (will block)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# Original problem was with multiple disconnects, but to simplify we make
# l2 send shutdown too.
fut2 = executor.submit(l2.rpc.close, l1.info['id'])
# Reconnect, shutdown will continue disconnect again
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Now l2 should be in CLOSINGD_SIGEXCHANGE, l1 still waiting on
# WIRE_REVOKE_AND_ACK.
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_SHUTTING_DOWN'
# They don't realize they're not talking, so disconnect and reconnect.
l1.rpc.disconnect(l2.info['id'], force=True)
# Now it hangs, since l1 is expecting rexmit of revoke-and-ack.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
fut2.result(TIMEOUT)
@pytest.mark.openchannel('v1')
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel(node_factory, executor):
"""Ideally you'd keep talking to us about closed channels: simple"""
disconnects = ['xWIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 reconnects, it should succeed.
l1.rpc.disconnect(l2.info['id'], force=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel_onchain(node_factory, bitcoind, executor):
"""Ideally you'd keep talking to us about closed channels: even if close is mined"""
disconnects = ['xWIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 does not see any new blocks.
def no_new_blocks(req):
return {"result": {"blockhash": None, "block": None}}
l1.daemon.rpcproxy.mock_rpc('getrawblockbyheight', no_new_blocks)
# Close transaction mined
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'ONCHAIN')
# l1 reconnects, it should succeed.
# l1 will disconnect once it sees block
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
@pytest.mark.developer("too slow without fast polling for blocks")
def test_segwit_anyshutdown(node_factory, bitcoind, executor):
"""Try a range of future segwit versions for shutdown"""
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1.fundwallet(10**7)
# Based on BIP-320, but all changed to regtest.
addrs = ("BCRT1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KYGT080",
"bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry",
"bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56",
"BCRT1SW50QT2UWHA",
"bcrt1zw508d6qejxtdg4y5r3zarvaryv2wuatf",
"bcrt1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvseswlauz7",
"bcrt1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesyga46z",
"bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6")
for addr in addrs:
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
# If we don't actually make a payment, two of the above cases fail
# because the resulting tx is too small! Balance channel so close
# has two outputs.
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: any([c['state'] == 'CHANNELD_NORMAL' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
l1.pay(l2, 10**9 // 2)
l1.rpc.close(l2.info['id'], destination=addr)
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: all([c['state'] == 'ONCHAIN' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
@pytest.mark.developer("needs to manipulate features")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
def test_anysegwit_close_needs_feature(node_factory, bitcoind):
"""Rather than have peer reject our shutdown, we should refuse to shutdown toa v1+ address if they don't support it"""
# L2 says "no option_shutdown_anysegwit"
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True},
{'may_reconnect': True,
'dev-force-features': -27}])
with pytest.raises(RpcError, match=r'Peer does not allow v1\+ shutdown addresses'):
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
# From TFM: "Tell your friends to upgrade!"
l2.stop()
del l2.daemon.opts['dev-force-features']
l2.start()
# Now it will work!
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
bitcoind.generate_block(1, wait_for_mempool=1)
def test_close_feerate_range(node_factory, bitcoind, chainparams):
"""Test the quick-close fee range negotiation"""
l1, l2 = node_factory.line_graph(2)
notifications = []
def save_notifications(message, progress, request, **kwargs):
notifications.append(message)
# Lowball the range here.
with l1.rpc.notify(save_notifications):
l1.rpc.close(l2.info['id'], feerange=['253perkw', 'normal'])
if not chainparams['elements']:
l1_range = [138, 4110]
l2_range = [1027, 1000000]
else:
# That fee output is a little chunky.
l1_range = [220, 6547]
l2_range = [1636, 1000000]
l1.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l1_range[0], l1_range[1]))
l2.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l2_range[0], l2_range[1]))
overlap = [max(l1_range[0], l2_range[0]), min(l1_range[1], l2_range[1])]
l1.daemon.wait_for_log('performing quickclose in range {}sat-{}sat'.format(overlap[0], overlap[1]))
log = l1.daemon.is_in_log('Their actual closing tx fee is .*sat')
rate = re.match('.*Their actual closing tx fee is ([0-9]*sat).*', log).group(1)
assert notifications == ['Sending closing fee offer {}, with range {}sat-{}sat'.format(rate,
l1_range[0],
l1_range[1]),
'Received closing fee offer {}, with range {}sat-{}sat'.format(rate,
l2_range[0],
l2_range[1])]
def test_close_twice(node_factory, executor):
# First feerate is too low, second fixes it.
l1, l2 = node_factory.line_graph(2, opts=[{'allow_warning': True,
'may_reconnect': True},
{'allow_warning': True,
'may_reconnect': True,
'feerates': (15000, 15000, 15000, 15000)}])
# This makes it disconnect, since feerate is too low.
fut = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '500perkw'])
l1.daemon.wait_for_log('WARNING.*Unable to agree on a feerate')
fut2 = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '15000perkw'])
# Now reconnect, it should work.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert fut.result(TIMEOUT)['type'] == 'mutual'
assert fut2.result(TIMEOUT)['type'] == 'mutual'
def test_close_weight_estimate(node_factory, bitcoind):
"""closingd uses the expected closing tx weight to constrain fees; make sure that lightningd agrees
once it has the actual agreed tx"""
l1, l2 = node_factory.line_graph(2)
l1.rpc.close(l2.info['id'])
# Closingd gives this estimate before it begins
log = l1.daemon.wait_for_log('Expected closing weight = ')
expected_weight = int(re.match('.*Expected closing weight = ([0-9]*),.*', log).group(1))
# This is the actual weight: in theory this could use their
# actual sig, and thus vary, but we don't do that.
log = l1.daemon.wait_for_log('Their actual closing tx fee is')
actual_weight = int(re.match('.*: weight is ([0-9]*).*', log).group(1))
assert actual_weight == expected_weight
log = l1.daemon.wait_for_log('sendrawtransaction: ')
tx = re.match('.*sendrawtransaction: ([0-9a-f]*).*', log).group(1)
# This could actually be a bit shorter: 1 in 256 chance we get
# lucky with a sig and it's shorter. We have 2 sigs, so that's
# 1 in 128. Unlikely to do better than 2 bytes off though!
signed_weight = int(bitcoind.rpc.decoderawtransaction(tx)['weight'])
assert signed_weight <= actual_weight
assert signed_weight >= actual_weight - 2
|
channel.py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep, time
import pandas as pd
from threading import Thread
import requests
import re
from random import random
# choose a driver
wd = webdriver.Chrome()
# specify the the wait time for a new page to be fully loaded
wait_time_for_loading = 1
ytv_list = []
links_titles = []
def crawl_page(url):
wd.get(url)
def find_class_name(cname, plural=False):
if plural is False:
return wd.find_element_by_class_name(cname)
else:
return wd.find_elements_by_class_name(cname)
def get_text_by_class_name(cname, plural=False):
if plural is False:
return find_class_name(cname).text
else:
texts = [t.text for t in find_class_name(cname, True)]
return texts
def get_tar_txt(regex, src_txt):
text = re.findall(rf'{regex}', src_txt)
if len(text) != 0:
text = text[0] if len(text) == 1 or text[0] != text[-1] else text[1]
else:
text = ""
return text
def get_digits(strg):
num = ''.join(s for s in strg if s.isdigit())
if len(num) != 0:
return float(num)
else:
return 0
def multi_tasking(func, a_list, length, speed):
threads = []
for i in range(0, length, speed):
t = Thread(target=func, args=(a_list[i:i+speed if length - speed else length],))
t.start()
threads.append(t)
for t in threads:
t.join()
def channel_info():
sleep(wait_time_for_loading)
channel_name = get_text_by_class_name('style-scope ytd-channel-name')
sub_num = wd.find_element_by_xpath('//*[@id="subscriber-count"]').text
return channel_name, sub_num
def scroll_down(scrolling_limit=50):
videos = find_class_name('style-scope ytd-grid-video-renderer', True)
counter = 0
while True:
wd.find_element_by_tag_name('body').send_keys(Keys.END)
sleep(wait_time_for_loading)
videos_new = find_class_name('style-scope ytd-grid-video-renderer', True)
counter += 1
if len(videos) != len(videos_new) and counter != scrolling_limit:
videos = videos_new
print("Scrolling..." if random() > 0.5 else "..........")
else:
break
def get_links_titles(videos_):
for video in videos_:
v_info = video.find_element_by_xpath('.//*[@id="video-title"]')
link = v_info.get_attribute('href')
title = v_info.text
links_titles.append([link, title])
print("Processing..." if random() > 0.5 else "..........")
def get_videos_info(links_titles_):
for link_title in links_titles_:
link = link_title[0]
title = link_title[1]
r = requests.get(link)
when = get_tar_txt('[A-Z][a-z]{2} \d{1,}, [\d]{4}', r.text)
views = get_digits(get_tar_txt('(?<="viewCount":{"simpleText":")[\d,]+(?= views)', r.text))
likes = get_digits(get_tar_txt('[\d,]+(?= likes)', r.text))
dislikes = get_digits(get_tar_txt('[\d,]+(?= dislikes)', r.text))
ratio = "" if dislikes == 0 else likes / dislikes
ytv_list.append([title, when, views, likes, dislikes, (likes - dislikes), ratio, link])
print("Processing..." if random() > 0.5 else "..........")
def get_links():
links = [l[-1] for l in ytv_list[1:]]
return links
def get_views():
views = [v[2] for v in ytv_list[1:]]
return views
def run(url_):
global links_titles, ytv_list
start = time()
links_titles = []
ytv_list = [['Title', 'Posted on', 'Views', 'Likes', 'Dislikes', 'Difference(L-D)', 'Ratio(L/D)', 'Link']]
crawl_page(url_)
ch_info = channel_info()
filename = f'{ch_info[0]}({ch_info[1]}).xlsx'
scroll_down(1)
videos = find_class_name('style-scope ytd-grid-video-renderer', True)
multi_tasking(get_links_titles, videos, len(videos), 100)
print('Creating file....')
multi_tasking(get_videos_info, links_titles, len(links_titles), 20)
pd.DataFrame(ytv_list).to_excel(filename)
print(f'File {filename} created!')
end = time()
print("Total time taken: " + str((end-start)))
def quit_out():
wd.quit()
def main():
urls = ["A list of links to YouTube Channels' VIDEOS webpage..."]
for url in urls:
try:
run(url)
except:
continue
quit_out()
if __name__ == '__main__':
main()
|
test_event_processor.py
|
import json
import pytest
from threading import Thread
import time
import uuid
from ldclient.config import Config
from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator
from ldclient.event_processor import DefaultEventProcessor
from ldclient.util import log
from testing.http_util import start_server, BasicResponse
from testing.stub_util import MockResponse, MockHttp
default_config = Config()
user = {
'key': 'userkey',
'name': 'Red'
}
filtered_user = {
'key': 'userkey',
'privateAttrs': [ 'name' ]
}
numeric_user = {
'key': 1,
'secondary': 2,
'ip': 3,
'country': 4,
'email': 5,
'firstName': 6,
'lastName': 7,
'avatar': 8,
'name': 9,
'anonymous': False,
'custom': {
'age': 99
}
}
stringified_numeric_user = {
'key': '1',
'secondary': '2',
'ip': '3',
'country': '4',
'email': '5',
'firstName': '6',
'lastName': '7',
'avatar': '8',
'name': '9',
'anonymous': False,
'custom': {
'age': 99
}
}
ep = None
mock_http = None
def setup_function():
global mock_http
mock_http = MockHttp()
def teardown_function():
if ep is not None:
ep.stop()
class DefaultTestProcessor(DefaultEventProcessor):
def __init__(self, **kwargs):
if not 'diagnostic_opt_out' in kwargs:
kwargs['diagnostic_opt_out'] = True
config = Config(**kwargs)
diagnostic_accumulator = _DiagnosticAccumulator(create_diagnostic_id(config))
DefaultEventProcessor.__init__(self, config, mock_http, diagnostic_accumulator = diagnostic_accumulator)
def test_identify_event_is_queued():
with DefaultTestProcessor() as ep:
e = { 'kind': 'identify', 'user': user }
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 1
assert output == [{
'kind': 'identify',
'creationDate': e['creationDate'],
'key': user['key'],
'user': user
}]
def test_user_is_filtered_in_identify_event():
with DefaultTestProcessor(all_attributes_private = True) as ep:
e = { 'kind': 'identify', 'user': user }
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 1
assert output == [{
'kind': 'identify',
'creationDate': e['creationDate'],
'key': user['key'],
'user': filtered_user
}]
def test_user_attrs_are_stringified_in_identify_event():
with DefaultTestProcessor() as ep:
e = { 'kind': 'identify', 'user': numeric_user }
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 1
assert output == [{
'kind': 'identify',
'creationDate': e['creationDate'],
'key': stringified_numeric_user['key'],
'user': stringified_numeric_user
}]
def test_individual_feature_event_is_queued_with_index_event():
with DefaultTestProcessor() as ep:
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True
}
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 3
check_index_event(output[0], e, user)
check_feature_event(output[1], e, False, None)
check_summary_event(output[2])
def test_user_is_filtered_in_index_event():
with DefaultTestProcessor(all_attributes_private = True) as ep:
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True
}
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 3
check_index_event(output[0], e, filtered_user)
check_feature_event(output[1], e, False, None)
check_summary_event(output[2])
def test_user_attrs_are_stringified_in_index_event():
with DefaultTestProcessor() as ep:
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True
}
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 3
check_index_event(output[0], e, stringified_numeric_user)
check_feature_event(output[1], e, False, None)
check_summary_event(output[2])
def test_feature_event_can_contain_inline_user():
with DefaultTestProcessor(inline_users_in_events = True) as ep:
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True
}
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 2
check_feature_event(output[0], e, False, user)
check_summary_event(output[1])
def test_user_is_filtered_in_feature_event():
with DefaultTestProcessor(inline_users_in_events = True, all_attributes_private = True) as ep:
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True
}
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 2
check_feature_event(output[0], e, False, filtered_user)
check_summary_event(output[1])
def test_user_attrs_are_stringified_in_feature_event():
with DefaultTestProcessor(inline_users_in_events = True) as ep:
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True
}
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 2
check_feature_event(output[0], e, False, stringified_numeric_user)
check_summary_event(output[1])
def test_index_event_is_still_generated_if_inline_users_is_true_but_feature_event_is_not_tracked():
with DefaultTestProcessor(inline_users_in_events = True) as ep:
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': False
}
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 2
check_index_event(output[0], e, user)
check_summary_event(output[1])
def test_two_events_for_same_user_only_produce_one_index_event():
with DefaultTestProcessor(user_keys_flush_interval = 300) as ep:
e0 = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True
}
e1 = e0.copy();
ep.send_event(e0)
ep.send_event(e1)
output = flush_and_get_events(ep)
assert len(output) == 4
check_index_event(output[0], e0, user)
check_feature_event(output[1], e0, False, None)
check_feature_event(output[2], e1, False, None)
check_summary_event(output[3])
def test_new_index_event_is_added_if_user_cache_has_been_cleared():
with DefaultTestProcessor(user_keys_flush_interval = 0.1) as ep:
e0 = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True
}
e1 = e0.copy();
ep.send_event(e0);
time.sleep(0.2)
ep.send_event(e1)
output = flush_and_get_events(ep)
assert len(output) == 5
check_index_event(output[0], e0, user)
check_feature_event(output[1], e0, False, None)
check_index_event(output[2], e1, user)
check_feature_event(output[3], e1, False, None)
check_summary_event(output[4])
def test_event_kind_is_debug_if_flag_is_temporarily_in_debug_mode():
with DefaultTestProcessor() as ep:
future_time = now() + 100000
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default',
'trackEvents': False, 'debugEventsUntilDate': future_time
}
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 3
check_index_event(output[0], e, user)
check_feature_event(output[1], e, True, user)
check_summary_event(output[2])
def test_event_can_be_both_tracked_and_debugged():
with DefaultTestProcessor() as ep:
future_time = now() + 100000
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default',
'trackEvents': True, 'debugEventsUntilDate': future_time
}
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 4
check_index_event(output[0], e, user)
check_feature_event(output[1], e, False, None)
check_feature_event(output[2], e, True, user)
check_summary_event(output[3])
def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_server_time():
with DefaultTestProcessor() as ep:
# Pick a server time that is somewhat behind the client time
server_time = now() - 20000
# Send and flush an event we don't care about, just to set the last server time
mock_http.set_server_time(server_time)
ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }})
flush_and_get_events(ep)
# Now send an event with debug mode on, with a "debug until" time that is further in
# the future than the server time, but in the past compared to the client.
debug_until = server_time + 1000
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default',
'trackEvents': False, 'debugEventsUntilDate': debug_until
}
ep.send_event(e)
# Should get a summary event only, not a full feature event
output = flush_and_get_events(ep)
assert len(output) == 2
check_index_event(output[0], e, user)
check_summary_event(output[1])
def test_debug_mode_expires_based_on_server_time_if_server_time_is_later_than_client_time():
with DefaultTestProcessor() as ep:
# Pick a server time that is somewhat ahead of the client time
server_time = now() + 20000
# Send and flush an event we don't care about, just to set the last server time
mock_http.set_server_time(server_time)
ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }})
flush_and_get_events(ep)
# Now send an event with debug mode on, with a "debug until" time that is further in
# the future than the client time, but in the past compared to the server.
debug_until = server_time - 1000
e = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default',
'trackEvents': False, 'debugEventsUntilDate': debug_until
}
ep.send_event(e)
# Should get a summary event only, not a full feature event
output = flush_and_get_events(ep)
assert len(output) == 2
check_index_event(output[0], e, user)
check_summary_event(output[1])
def test_two_feature_events_for_same_user_generate_only_one_index_event():
with DefaultTestProcessor() as ep:
e1 = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value1', 'default': 'default', 'trackEvents': False
}
e2 = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 2, 'value': 'value2', 'default': 'default', 'trackEvents': False
}
ep.send_event(e1)
ep.send_event(e2)
output = flush_and_get_events(ep)
assert len(output) == 2
check_index_event(output[0], e1, user)
check_summary_event(output[1])
def test_nontracked_events_are_summarized():
with DefaultTestProcessor() as ep:
e1 = {
'kind': 'feature', 'key': 'flagkey1', 'version': 11, 'user': user,
'variation': 1, 'value': 'value1', 'default': 'default1', 'trackEvents': False
}
e2 = {
'kind': 'feature', 'key': 'flagkey2', 'version': 22, 'user': user,
'variation': 2, 'value': 'value2', 'default': 'default2', 'trackEvents': False
}
ep.send_event(e1)
ep.send_event(e2)
output = flush_and_get_events(ep)
assert len(output) == 2
check_index_event(output[0], e1, user)
se = output[1]
assert se['kind'] == 'summary'
assert se['startDate'] == e1['creationDate']
assert se['endDate'] == e2['creationDate']
assert se['features'] == {
'flagkey1': {
'default': 'default1',
'counters': [ { 'version': 11, 'variation': 1, 'value': 'value1', 'count': 1 } ]
},
'flagkey2': {
'default': 'default2',
'counters': [ { 'version': 22, 'variation': 2, 'value': 'value2', 'count': 1 } ]
}
}
def test_custom_event_is_queued_with_user():
with DefaultTestProcessor() as ep:
e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '}, 'metricValue': 1.5 }
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 2
check_index_event(output[0], e, user)
check_custom_event(output[1], e, None)
def test_custom_event_can_contain_inline_user():
with DefaultTestProcessor(inline_users_in_events = True) as ep:
e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} }
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 1
check_custom_event(output[0], e, user)
def test_user_is_filtered_in_custom_event():
with DefaultTestProcessor(inline_users_in_events = True, all_attributes_private = True) as ep:
e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} }
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 1
check_custom_event(output[0], e, filtered_user)
def test_user_attrs_are_stringified_in_custom_event():
with DefaultTestProcessor(inline_users_in_events = True) as ep:
e = { 'kind': 'custom', 'key': 'eventkey', 'user': numeric_user, 'data': { 'thing': 'stuff '} }
ep.send_event(e)
output = flush_and_get_events(ep)
assert len(output) == 1
check_custom_event(output[0], e, stringified_numeric_user)
def test_nothing_is_sent_if_there_are_no_events():
with DefaultTestProcessor() as ep:
ep.flush()
ep._wait_until_inactive()
assert mock_http.request_data is None
def test_sdk_key_is_sent():
with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep:
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
assert mock_http.request_headers.get('Authorization') == 'SDK_KEY'
def test_wrapper_header_not_sent_when_not_set():
with DefaultTestProcessor() as ep:
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') is None
def test_wrapper_header_sent_when_set():
with DefaultTestProcessor(wrapper_name = "Flask", wrapper_version = "0.0.1") as ep:
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') == "Flask/0.0.1"
def test_wrapper_header_sent_without_version():
with DefaultTestProcessor(wrapper_name = "Flask") as ep:
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') == "Flask"
def test_event_schema_set_on_event_send():
with DefaultTestProcessor() as ep:
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
assert mock_http.request_headers.get('X-LaunchDarkly-Event-Schema') == "3"
def test_sdk_key_is_sent_on_diagnostic_request():
with DefaultTestProcessor(sdk_key = 'SDK_KEY', diagnostic_opt_out=False) as ep:
ep._wait_until_inactive()
assert mock_http.request_headers.get('Authorization') == 'SDK_KEY'
def test_event_schema_not_set_on_diagnostic_send():
with DefaultTestProcessor(diagnostic_opt_out=False) as ep:
ep._wait_until_inactive()
assert mock_http.request_headers.get('X-LaunchDarkly-Event-Schema') is None
def test_init_diagnostic_event_sent():
with DefaultTestProcessor(diagnostic_opt_out=False) as ep:
diag_init = flush_and_get_events(ep)
# Fields are tested in test_diagnostics.py
assert len(diag_init) == 6
assert diag_init['kind'] == 'diagnostic-init'
def test_periodic_diagnostic_includes_events_in_batch():
with DefaultTestProcessor(diagnostic_opt_out=False) as ep:
# Ignore init event
flush_and_get_events(ep)
# Send a payload with a single event
ep.send_event({ 'kind': 'identify', 'user': user })
flush_and_get_events(ep)
ep._send_diagnostic()
diag_event = flush_and_get_events(ep)
assert len(diag_event) == 8
assert diag_event['kind'] == 'diagnostic'
assert diag_event['eventsInLastBatch'] == 1
assert diag_event['deduplicatedUsers'] == 0
def test_periodic_diagnostic_includes_deduplicated_users():
with DefaultTestProcessor(diagnostic_opt_out=False) as ep:
# Ignore init event
flush_and_get_events(ep)
# Send two eval events with the same user to cause a user deduplication
e0 = {
'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user,
'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True
}
e1 = e0.copy();
ep.send_event(e0)
ep.send_event(e1)
flush_and_get_events(ep)
ep._send_diagnostic()
diag_event = flush_and_get_events(ep)
assert len(diag_event) == 8
assert diag_event['kind'] == 'diagnostic'
assert diag_event['eventsInLastBatch'] == 3
assert diag_event['deduplicatedUsers'] == 1
def test_no_more_payloads_are_sent_after_401_error():
verify_unrecoverable_http_error(401)
def test_no_more_payloads_are_sent_after_403_error():
verify_unrecoverable_http_error(403)
def test_will_still_send_after_408_error():
verify_recoverable_http_error(408)
def test_will_still_send_after_429_error():
verify_recoverable_http_error(429)
def test_will_still_send_after_500_error():
verify_recoverable_http_error(500)
def test_does_not_block_on_full_inbox():
config = Config(events_max_pending=1) # this sets the size of both the inbox and the outbox to 1
ep_inbox_holder = [ None ]
ep_inbox = None
def dispatcher_factory(inbox, config, http, diag):
ep_inbox_holder[0] = inbox # it's an array because otherwise it's hard for a closure to modify a variable
return None # the dispatcher object itself doesn't matter, we only manipulate the inbox
def event_consumer():
while True:
message = ep_inbox.get(block=True)
if message.type == 'stop':
message.param.set()
return
def start_consuming_events():
Thread(target=event_consumer).start()
with DefaultEventProcessor(config, mock_http, dispatcher_factory) as ep:
ep_inbox = ep_inbox_holder[0]
event1 = { 'kind': 'custom', 'key': 'event1', 'user': user }
event2 = { 'kind': 'custom', 'key': 'event2', 'user': user }
ep.send_event(event1)
ep.send_event(event2) # this event should be dropped - inbox is full
message1 = ep_inbox.get(block=False)
had_no_more = ep_inbox.empty()
start_consuming_events()
assert message1.param == event1
assert had_no_more
def test_can_use_http_proxy_via_environment_var(monkeypatch):
with start_server() as server:
monkeypatch.setenv('http_proxy', server.uri)
config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real', diagnostic_opt_out = True)
_verify_http_proxy_is_used(server, config)
def test_can_use_https_proxy_via_environment_var(monkeypatch):
with start_server() as server:
monkeypatch.setenv('https_proxy', server.uri)
config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real', diagnostic_opt_out = True)
_verify_https_proxy_is_used(server, config)
def test_can_use_http_proxy_via_config():
with start_server() as server:
config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real', http_proxy=server.uri, diagnostic_opt_out = True)
_verify_http_proxy_is_used(server, config)
def test_can_use_https_proxy_via_config():
with start_server() as server:
config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real', http_proxy=server.uri, diagnostic_opt_out = True)
_verify_https_proxy_is_used(server, config)
def _verify_http_proxy_is_used(server, config):
server.for_path(config.events_uri + '/bulk', BasicResponse(200))
with DefaultEventProcessor(config) as ep:
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
# For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the
# HTTP client, so we should be able to see the request go through. Note that the URI path will
# actually be an absolute URI for a proxy request.
req = server.require_request()
assert req.method == 'POST'
def _verify_https_proxy_is_used(server, config):
server.for_path(config.events_uri + '/bulk', BasicResponse(200))
with DefaultEventProcessor(config) as ep:
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
# Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but
# it can still record that it *got* the request, which proves that the request went to the proxy.
req = server.require_request()
assert req.method == 'CONNECT'
def verify_unrecoverable_http_error(status):
with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep:
mock_http.set_response_status(status)
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
mock_http.reset()
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
assert mock_http.request_data is None
def verify_recoverable_http_error(status):
with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep:
mock_http.set_response_status(status)
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
mock_http.reset()
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
assert mock_http.request_data is not None
def test_event_payload_id_is_sent():
with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep:
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
headerVal = mock_http.request_headers.get('X-LaunchDarkly-Payload-ID')
assert headerVal is not None
# Throws on invalid UUID
uuid.UUID(headerVal)
def test_event_payload_id_changes_between_requests():
with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep:
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
ep.send_event({ 'kind': 'identify', 'user': user })
ep.flush()
ep._wait_until_inactive()
firstPayloadId = mock_http.recorded_requests[0][0].get('X-LaunchDarkly-Payload-ID')
secondPayloadId = mock_http.recorded_requests[1][0].get('X-LaunchDarkly-Payload-ID')
assert firstPayloadId != secondPayloadId
def flush_and_get_events(ep):
ep.flush()
ep._wait_until_inactive()
if mock_http.request_data is None:
raise AssertionError('Expected to get an HTTP request but did not get one')
else:
return json.loads(mock_http.request_data)
def check_index_event(data, source, user):
assert data['kind'] == 'index'
assert data['creationDate'] == source['creationDate']
assert data['user'] == user
def check_feature_event(data, source, debug, inline_user):
assert data['kind'] == ('debug' if debug else 'feature')
assert data['creationDate'] == source['creationDate']
assert data['key'] == source['key']
assert data.get('version') == source.get('version')
assert data.get('variation') == source.get('variation')
assert data.get('value') == source.get('value')
assert data.get('default') == source.get('default')
if inline_user is None:
assert data['userKey'] == str(source['user']['key'])
else:
assert data['user'] == inline_user
def check_custom_event(data, source, inline_user):
assert data['kind'] == 'custom'
assert data['creationDate'] == source['creationDate']
assert data['key'] == source['key']
assert data['data'] == source['data']
if inline_user is None:
assert data['userKey'] == source['user']['key']
else:
assert data['user'] == inline_user
assert data.get('metricValue') == source.get('metricValue')
def check_summary_event(data):
assert data['kind'] == 'summary'
def now():
return int(time.time() * 1000)
|
robot_controller_utils.py
|
import hebi
from math import pi
from time import sleep, time
import numpy as np
import threading
import sys
import trace
joint_state = {"base": 0, "shoulder": 0, "elbow": 0}
class KThread(threading.Thread):
"""A subclass of threading.Thread, with a kill() method."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
class Hebi3dofArm(object):
def __init__(self):
self.family_name = "Test Family"
self.module_name = ["base", "shoulder", "elbow"]
def initialize_arm(self):
lookup = hebi.Lookup()
# Wait 2 seconds for the module list to populate
sleep(2.0)
self.group = lookup.get_group_from_names([self.family_name], self.module_name)
self.base = lookup.get_group_from_names([self.family_name], [self.module_name[0]])
self.shoulder = lookup.get_group_from_names([self.family_name], [self.module_name[1]])
self.elbow = lookup.get_group_from_names([self.family_name], [self.module_name[2]])
if self.group is None:
print('Group not found: Did you forget to set the module family and name above?')
exit(1)
self.group_feedback = hebi.GroupFeedback(self.group.size)
self.base_feedback = hebi.GroupFeedback(self.base.size)
self.shoulder_feedback = hebi.GroupFeedback(self.shoulder.size)
self.elbow_feedback = hebi.GroupFeedback(self.elbow.size)
if all(joint is None for joint in [self.group.get_next_feedback(reuse_fbk=self.group_feedback),
self.base.get_next_feedback(reuse_fbk=self.base_feedback),
self.shoulder.get_next_feedback(reuse_fbk=self.shoulder_feedback),
self.elbow.get_next_feedback(reuse_fbk=self.elbow_feedback)]):
print('Error getting feedback.')
exit(1)
# Start logging in the background
self.group.start_log('logs', mkdirs=True)
self.base_command = hebi.GroupCommand(self.base.size)
self.shoulder_command = hebi.GroupCommand(self.shoulder.size)
self.elbow_command = hebi.GroupCommand(self.elbow.size)
self.joints = {"base": ["base", self.base, self.base_feedback, self.base_command], "shoulder": ["shoulder", self.shoulder, self.shoulder_feedback, self.shoulder_command], "elbow": ["elbow", self.elbow, self.elbow_feedback, self.elbow_command]}
def rotate_joints(self, motor, angle):
current_joint_name, joint, joint_feedback, joint_command = motor
other_joints = [self.joints[joint] for joint in self.module_name if joint != current_joint_name]
positions = np.zeros((joint.size, 2), dtype=np.float64)
offset = [pi] * joint.size
current_pos = joint_feedback.position
positions[:, 0] = current_pos
positions[:, 1] = current_pos + float(angle)*pi/180
time_vector = [0, 3]
trajectory = hebi.trajectory.create_trajectory(time_vector, positions)
duration = trajectory.duration
start = time()
t = time() - start
joint_state[current_joint_name] = 1
while t < duration:
joint.get_next_feedback(reuse_fbk=joint_feedback)
t = time() - start
pos, vel, acc = trajectory.get_state(t)
joint_command.position = pos
joint_command.velocity = vel
joint.send_command(joint_command)
joint_state[current_joint_name] = 0
def handle_joints(self):
while True:
for joint, state in joint_state.items():
if not state:
current_joint_name, joint, joint_feedback, joint_command = self.joints[joint]
joint_command.position = joint_feedback.position
joint.send_command(joint_command)
class StateMachine(object):
def __init__(self):
self.arm = Hebi3dofArm()
self.states = {"HOME":[{'base': -30}, {'shoulder': -55}, {'elbow': 90}],
"C1":[{'base': 45}, {'shoulder': 30}, {'elbow': 365}],
"C2":[{'base': 70}, {'shoulder': 30}, {'elbow': 365}],
"C3":[{'base': 95}, {'shoulder': 30}, {'elbow': 365}],
"C4":[{'base': 120}, {'shoulder': 30}, {'elbow': 365}],
"C5":[{'base': 145}, {'shoulder': 30}, {'elbow': 365}],
"C6":[{'base': 170}, {'shoulder': 30}, {'elbow': 365}],
"C7":[{'base': 195}, {'shoulder': 30}, {'elbow': 365}]}
def go_home(self):
print("Robot Started: State transition to HOME")
base, shoulder, elbow = self.states["HOME"]
self.arm.rotate_joints(self.arm.joints["shoulder"], shoulder["shoulder"])
self.arm.rotate_joints(self.arm.joints["base"], base["base"])
self.arm.rotate_joints(self.arm.joints["elbow"], elbow["elbow"])
def state_transition(self, state):
print("State transition from HOME to {}".format(state))
base, shoulder, elbow = self.states[state]
self.arm.rotate_joints(self.arm.joints["base"], base["base"])
self.arm.rotate_joints(self.arm.joints["shoulder"], shoulder["shoulder"])
self.arm.rotate_joints(self.arm.joints["elbow"], elbow["elbow"])
print("State transition from {} to HOME".format(state))
self.arm.rotate_joints(self.arm.joints["shoulder"], -shoulder["shoulder"] - 10)
self.arm.rotate_joints(self.arm.joints["base"], -base["base"] - 1.5)
# self.arm.rotate_joints(self.arm.joints["elbow"], -elbow["elbow"])
def main(self):
try:
self.arm.initialize_arm()
thread = KThread(target=self.arm.handle_joints,)
thread.start()
self.go_home()
status = 1
while status:
print("Please enter which state to transition")
s = input()
if s == 'c':
status = 0
else:
self.state_transition(s)
thread.kill()
self.arm.group.stop_log()
except KeyboardInterrupt:
thread.kill()
sys.exit(0)
|
hockey_twitter_bot.py
|
# -*- coding: utf-8 -*-
"""
This module parses the NHL Schedule & Live Feed API endpoints
to gather rather relevant game events and tweet them to the
game bot Twitter acount.
"""
# pylint: disable=C0103
# pylint: disable=wildcard-import, pointless-string-statement
# pylint: disable=too-many-statements, too-many-branches, too-many-locals, too-many-lines
# Standard Imports
from __future__ import unicode_literals
import argparse
import configparser
import json
import logging
import math
import os
import platform
import socket
import sys
import time
from datetime import datetime, timedelta
from subprocess import Popen
import dateutil.tz
# 3rd Party Imports
import linode
import pytz
import requests
import tweepy
from bs4 import BeautifulSoup
from PIL import Image, ImageDraw, ImageFont
# Discord imports
import asyncio
import discord
import time
import threading
import readline
# My Local / Custom Imports
import advanced_stats
import hockey_bot_imaging
import nhl_game_events
import other_game_info
# If running via Docker, there is no secret.py file
# Config is done via ENV variables - just pass through this error
try:
from secret import *
except ImportError:
pass
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Congfiguration, Logging & Argument Parsing
# ------------------------------------------------------------------------------
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
config = configparser.ConfigParser()
conf_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.ini')
config.read(conf_path)
TEAM_BOT = config['DEFAULT']['TEAM_NAME']
NHLAPI_BASEURL = config['ENDPOINTS']['NHL_BASE']
# TWITTER_URL = config['ENDPOINTS']['TWITTER_URL']
TWITTER_ID = config['ENDPOINTS']['TWITTER_HANDLE']
TWITTER_URL = f'https://twitter.com/{TWITTER_ID}/status/'
VPS_CLOUDHOST = config['VPS']['CLOUDHOST']
VPS_HOSTNAME = config['VPS']['HOSTNAME']
# Discord Imports (Uncomment Top Line to Enable Debug Mode)
# CHANNEL_ID = config['DISCORD']['DEBUG_CHANNEL_ID']
CHANNEL_ID = config['DISCORD']['CHANNEL_ID']
message_queue = asyncio.Queue()
def setup_logging():
"""
Configures application logging and prints the first three log lines.
Input:
None
Output:
None
"""
#pylint: disable=line-too-long
# logger = logging.getLogger(__name__)
log_file_name = datetime.now().strftime(config['SCRIPT']['LOG_FILE_NAME'] + '-%Y%m%d%H%M%s.log')
log_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logs', log_file_name)
if args.console and args.debug:
logging.basicConfig(level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s - %(module)s.%(funcName)s (%(lineno)d) - %(levelname)s - %(message)s')
elif args.console:
logging.basicConfig(level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s - %(module)s.%(funcName)s - %(levelname)s - %(message)s')
else:
logging.basicConfig(filename=log_file, level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def parse_arguments():
"""
Parses arguments passed into the python script on the command line.command
Input:
None
Output:
args - argument Namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument("--notweets", help="log tweets to console instead of Twitter",
action="store_true")
parser.add_argument("--console", help="log to console instead of file",
action="store_true")
parser.add_argument("--debug", help="print debug log items",
action="store_true")
parser.add_argument("--team", help="override team in configuration",
action="store")
parser.add_argument("--debugtweets", help="send tweets from debug account",
action="store_true")
parser.add_argument("--localdata", help="use local data instead of API",
action="store_true")
parser.add_argument("--overridelines", help="override lines if None are returned",
action="store_true")
parser.add_argument("--yesterday", help="get yesterday game on the schedule",
action="store_true")
parser.add_argument("--date", help="override game date",
action="store")
parser.add_argument("--split", help="split squad game index",
action="store_true")
parser.add_argument("--docker", help="running in a docker container",
action="store_true")
parser.add_argument("--discord", help="Send messages to discord channel",
action="store_true")
arguments = parser.parse_args()
return arguments
def parse_env_variables(args):
"""
For when running via Docker, parse Environment variables.
Environment variables replace command line arguments.
Input:
args - argument Namespace
Output:
None
"""
if "ARGS_NOTWEETS" in os.environ and os.environ['ARGS_NOTWEETS'] == "TRUE":
args.notweets = True
if "ARGS_DEBUG" in os.environ and os.environ['ARGS_DEBUG'] == "TRUE":
args.debug = True
if "ARGS_TEAM" in os.environ:
args.team = os.environ['ARGS_TEAM']
if "ARGS_DEBUGTWEETS" in os.environ and os.environ['ARGS_DEBUGTWEETS'] == "TRUE":
args.debugtweets = True
if "ARGS_DATE" in os.environ:
args.date = os.environ['ARGS_DATE']
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Linode Related Functions
# ------------------------------------------------------------------------------
def is_linode():
"""Checks if the python script is running on a Linode instance."""
hostname = socket.gethostname()
platform_release = platform.release()
if hostname == VPS_HOSTNAME or VPS_CLOUDHOST in platform_release:
logging.info("Script is running on a Cloud VPS - host detected!")
return True
logging.info("Script is not running on specified Cloud VPS host!")
return False
def linode_shutdown():
"""
Create a Linode client (via apikey) & shutdown Linode ID specified in config.
Input:
None
Output:
None
"""
logging.info("Linode (%s) shutdown requested.", linode_id_devils)
# Create the Linode client & initiate the Linode
client = linode.linode_client.LinodeClient(linode_apikey)
l = linode.objects.Linode(client, linode_id_devils)
# Request the Linode to shutdown
l.shutdown()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Discord Methods
# ------------------------------------------------------------------------------
def bot_thread(loop, bot, bot_token, message_queue, channel_id):
asyncio.set_event_loop(loop)
@bot.event
async def on_ready():
while True:
data = await message_queue.get()
if len(data) == 3: # No Image
logging.info('Discord Message w/o Image Detected - %s', data)
event = data[0]
message = data[1]
channel_id = data[2]
message = f'▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\n{message}'
try:
await bot.send_message(bot.get_channel(channel_id), message)
except Exception as e:
logging.warning('Error sending Discord message - %s', e)
event.set()
elif len(data) == 4: # Image to Send
logging.info('Discord Message w/ Image Detected - %s', data)
event = data[0]
message = data[1]
image = data[2]
channel_id = data[3]
message = f'▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬\n{message}'
try:
await bot.send_file(bot.get_channel(channel_id), image, content=message)
except Exception as e:
logging.warning('Error sending Discord image & message - %s', e)
event.set()
bot.run(DISCORD_TOKEN, bot = bot_token)
def send_discord(channel_id, message, image=None):
event = threading.Event()
if image is None:
logging.info('Sending Discord Message (Channel %s) - %s', channel_id, message)
message_queue.put_nowait([event, message, channel_id])
else:
logging.info('Sending Discord Message w/ Image (Channel %s) - %s - %s', channel_id, message, image)
message_queue.put_nowait([event, message, image, channel_id])
event.wait()
def start_discord_bot():
loop = asyncio.new_event_loop()
bot = discord.Client()
bot_token = True
thread = threading.Thread(target = bot_thread, args = (loop, bot, bot_token, message_queue, CHANNEL_ID), daemon = True)
thread.start()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Tweepy & Miscellaneous Methods
# ------------------------------------------------------------------------------
def get_api():
"""
Returns an Authorized session of the Tweepy API.
Input:
None
Output:
tweepy_session - authorized twitter session that can send a tweet.
"""
if args.debugtweets:
auth = tweepy.OAuthHandler(debug_consumer_key, debug_consumer_secret)
auth.set_access_token(debug_access_token, debug_access_secret)
else:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
tweepy_session = tweepy.API(auth)
return tweepy_session
def send_tweet(tweet_text, reply=None):
"""
Sends a tweet from the account returned from get_api method.
Input:
tweet_text - The text to send as a tweet (may contain URL at end to qote tweet)
Output:
last_tweet - A link to the last tweet sent (or search result if duplicate)
If duplicate cannot be found, returns base URL (also raises error)
"""
# pylint: disable=bare-except
# If the --notweets flag is passed, log tweets instead of Twitter
if args.notweets:
logging.info("%s", tweet_text)
return TWITTER_URL
try:
api = get_api()
tweet_length = len(tweet_text)
logging.debug("Tweet length - %s", tweet_length)
if tweet_length < 280:
if reply is None:
logging.debug("Plain tweet, no reply.")
status = api.update_status(status=tweet_text)
else:
tweet_text = "@{} {}".format(TWITTER_ID, tweet_text)
logging.debug("Reply to tweet %s - \n%s", reply, tweet_text)
status = api.update_status(tweet_text, in_reply_to_status_id=reply)
# Return a full link to the URL in case a quote-tweet is needed
tweet_id = status.id_str
else:
logging.warning("A tweet longer than 280 characters was detected.")
logging.warning("Tweet: %s", tweet_text)
tweet_id = TWITTER_URL
# tweet_array = []
# tweets_needed = math.ceil(tweet_length / 280)
# for i in range(tweets_needed):
# range_start = (i * 280)
# range_end = ((i+1) * 280)
# tweet_array.append(tweet_text[range_start:range_end])
return tweet_id
except tweepy.TweepError as tweep_error:
try:
error_code = tweep_error.api_code
if error_code == 187:
if "score" in tweet_text.lower():
logging.info(
"Duplicate status detected - search for duplicate tweet.")
results = api.search(q=tweet_text)
if results:
tweet_id = results[0].id_str
# last_tweet = "{}{}".format(TWITTER_URL, tweet_id)
# return last_tweet
return tweet_id
else:
logging.info(
"Duplicate status detected, but not a goal - no need to search.")
return False
else:
logging.error("Non-duplicate tweet error: %s", tweep_error)
return False
except:
logging.critical("%s", sys.exc_info()[0])
return False
# Returns the ordinal variant of a number
ordinal = lambda n: "%d%s" % (n, "tsnrhtdd"[(math.floor(n/10)%10 != 1)*(n%10 < 4)*n%10::4])
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Event Object Related Methods
# ------------------------------------------------------------------------------
def update_object_attributes(json_feed, game):
"""
Takes in a JSON object of the livefeed API and updates relevant
object attributes.
"""
# logging.info("Updating game & team object attributes.")
linescore = json_feed["liveData"]["linescore"]
# Updated game related attributes
game.game_state = json_feed["gameData"]["status"]["abstractGameState"]
# Update period related attributes
if game.game_state != "Preview":
game.period.current = linescore["currentPeriod"]
game.period.current_ordinal = linescore["currentPeriodOrdinal"]
game.period.time_remaining = linescore["currentPeriodTimeRemaining"]
game.period.intermission = linescore["intermissionInfo"]["inIntermission"]
# Update team related attributes
linescore_home = linescore["teams"]["home"]
linescore_away = linescore["teams"]["away"]
game.home_team.score = linescore_home["goals"]
game.home_team.shots = linescore_home["shotsOnGoal"]
# game.home_team.goalie_pulled = linescore_home["goaliePulled"]
game.away_team.score = linescore_away["goals"]
game.away_team.shots = linescore_away["shotsOnGoal"]
# game.away_team.goalie_pulled = linescore_away["goaliePulled"]
try:
all_plays = json_feed["liveData"]["plays"]["allPlays"]
last_event = all_plays[game.last_event_idx]
last_event_type = last_event["result"]["eventTypeId"]
event_filter_list = ["GOAL", "PENALTY"]
# Logic for tracking if a team kills a penalty
last_power_player_strength = game.power_play_strength
last_home_power_play = game.home_team.power_play
last_home_skaters = game.home_team.skaters
last_away_power_play = game.away_team.power_play
last_away_skaters = game.away_team.skaters
game.power_play_strength = json_feed["liveData"]["linescore"]["powerPlayStrength"]
game.home_team.power_play = linescore_home["powerPlay"]
game.home_team.skaters = linescore_home["numSkaters"]
game.away_team.power_play = linescore_away["powerPlay"]
game.away_team.skaters = linescore_away["numSkaters"]
# TODO: Track onIce Players Array?
preferred_homeaway = game.preferred_team.home_away
other_homeaway = game.other_team.home_away
on_ice_pref = json_feed["liveData"]["boxscore"]["teams"][preferred_homeaway]["onIce"]
on_ice_other = json_feed["liveData"]["boxscore"]["teams"][other_homeaway]["onIce"]
logging.info("Current Away Skaters: %s | Current Home Skaters: %s",
game.away_team.skaters, game.home_team.skaters)
logging.info("Current Power Play Strength: %s", game.power_play_strength)
logging.info("Preferred On Ice (%s): %s", len(on_ice_pref), on_ice_pref)
logging.info("Other On Ice (%s): %s\n", len(on_ice_other), on_ice_other)
# These conditions happen if one of the teams was
# previously on a power play, but aren't anymore
if last_home_power_play and not game.home_team.power_play:
logging.info("PP Strength Change - Home team was on a power play, but now aren't anymore.")
pk_team = game.home_team
pk_linescore = linescore_home
game.penalty_killed_flag = True
elif last_away_power_play and not game.away_team.power_play:
logging.info("PP Strength Change - Away team was on a power play, but now aren't anymore.")
pk_team = game.away_team
pk_linescore = linescore_away
game.penalty_killed_flag = True
elif last_home_skaters == 3 and game.home_team.skaters != 3:
logging.info("Num Skaters Change - Home team MIGHT be coming off a 5-on-3.")
pk_team = game.home_team
pk_linescore = linescore_home
game.penalty_killed_flag = True
elif last_away_skaters == 3 and game.away_team.skaters != 3:
logging.info("Num Skaters Change - Away team MIGHT be coming off a 5-on-3.")
pk_team = game.away_team
pk_linescore = linescore_away
game.penalty_killed_flag = True
if game.penalty_killed_flag and last_event_type not in event_filter_list:
logging.info("Last event was not a goal or penalty and skater number changed.")
logging.info("Previous Home Skaters: %s | Current Home Skaters: %s",
last_home_skaters, game.home_team.skaters)
logging.info("Previous Away Skaters: %s | Current Away Skaters: %s",
last_away_skaters, game.away_team.skaters)
logging.info('%s kill off a penalty with %s remaining in the %s period!',
pk_team.short_name, linescore['currentPeriodTimeRemaining'],
linescore['currentPeriodOrdinal'])
game.penalty_killed_flag = False
except Exception as e:
game.penalty_killed_flag = False
logging.warning("Issue checking if power play strength changed.")
logging.warning(e)
# Logic for keeping goalie pulled with events in between
try:
all_plays = json_feed["liveData"]["plays"]["allPlays"]
last_event = all_plays[game.last_event_idx]
last_event_type = last_event["result"]["eventTypeId"]
event_filter_list = ["GOAL", "PENALTY"]
# Get current values
home_goalie_pulled = game.home_team.goalie_pulled
away_goalie_pulled = game.away_team.goalie_pulled
if not game.home_team.goalie_pulled:
logging.debug("Home goalie in net - check and update attribute.")
home_goalie_pulled = game.home_team.goalie_pulled_setter(linescore_home["goaliePulled"])
elif game.home_team.goalie_pulled and last_event_type in event_filter_list:
logging.info("Home goalie was pulled and an important event detected - update.")
home_goalie_pulled = game.home_team.goalie_pulled_setter(linescore_home["goaliePulled"])
else:
logging.info("Home goalie is pulled and a non-important event detected, don't update.")
return
if not game.away_team.goalie_pulled:
logging.debug("Away goalie in net - check and update attribute.")
away_goalie_pulled = game.away_team.goalie_pulled_setter(linescore_away["goaliePulled"])
elif game.away_team.goalie_pulled and last_event_type in event_filter_list:
logging.info("Away goalie was pulled and an important event detected - update.")
away_goalie_pulled = game.away_team.goalie_pulled_setter(linescore_away["goaliePulled"])
else:
logging.info("Away goalie is pulled and a non-important event detected, don't update.")
return
# Calls the goalie_pulled function if the goalie has been pulled
if home_goalie_pulled:
goalie_pull_tweet(game, "home")
elif away_goalie_pulled:
goalie_pull_tweet(game, "away")
except IndexError:
logging.warning("Tried to update goalie pulled attribute, but index error - try again.")
def recent_event(event):
"""Determines if an event has happened recently enough. Used to not send old tweets.
Args:
event (dict): A dictionary of the event to check.
Returns:
bool: True if the event happened within the past minute, False if not.
"""
if args.yesterday:
return True
event_type = event["result"]["eventTypeId"]
event_idx = event["about"]["eventIdx"]
event_datetime = event["about"]["dateTime"]
now = datetime.now()
localtz = dateutil.tz.tzlocal()
localoffset = localtz.utcoffset(datetime.now(localtz))
date_time_obj = datetime.strptime(event_datetime, '%Y-%m-%dT%H:%M:%SZ')
date_time_local = date_time_obj + localoffset
seconds_since_event = int((now - date_time_local).total_seconds())
logging.info("Event #%s (%s) occurred %s second(s) in the past - if greater than 120, skip.",
event_idx, event_type, seconds_since_event)
return bool(seconds_since_event < int(config['SCRIPT']['EVENT_TIMEOUT']))
def show_all_objects():
"""Outputs all relevant game objects to console."""
# (preferred_team, other_team) = nhl_game_events.preferred_teams(home_team_obj, away_team_obj)
preferred_team = game_obj.preferred_team
print("** Game Attributes **")
print(game_obj)
for k, v in vars(game_obj).items():
print("{}: {}".format(k, v))
print(game_obj.game_time_local)
print(game_obj.game_time_countdown)
print(game_obj.game_hashtag)
print(game_obj.live_feed)
print("Preferred Team TV Channel: {}".format(preferred_team.tv_channel))
print("\n** Home Team Attributes **")
print(home_team_obj)
for k, v in vars(home_team_obj).items():
print("{}: {}".format(k, v))
print("\n** Away Team Attributes **")
print(away_team_obj)
for k, v in vars(away_team_obj).items():
print("{}: {}".format(k, v))
print("\n** Period Attributes **")
print(game_obj.period)
for k, v in vars(game_obj.period).items():
print("{}: {}".format(k, v))
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Image Generation Functions
# ------------------------------------------------------------------------------
def luminance(pixel):
return (0.299 * pixel[0] + 0.587 * pixel[1] + 0.114 * pixel[2])
def are_colors_similar(color_a, color_b):
return abs(luminance(color_a) - luminance(color_b)) < 18
def custom_font_size(fontName, size):
return ImageFont.truetype(fontName, size)
def pregame_image(game):
if not args.notweets:
# Check if the preview tweet has been sent already
api = get_api()
search_text = "{} tune".format(TWITTER_ID)
search_results = api.search(q=search_text, count=1)
if len(search_results) > 0:
logging.info("Found an old tune-in tweet - checking if sent today.")
latest_tweet_date = search_results[0].created_at
# If preview tweet was sent today, return False and skip this section
logging.info("Previous tune in tweet - %s", latest_tweet_date)
if latest_tweet_date.date() == datetime.now().date():
return None
# Load Required Fonts
FONT_OPENSANS_BOLD = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Bold.ttf')
FONT_OPENSANS_SEMIBOLD = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-SemiBold.ttf')
FONT_OPENSANS_EXTRABOLD = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-ExtraBold.ttf')
FONT_COLOR_WHITE = (255, 255, 255)
# Set the background / load the baseline image
bg = Image.open(os.path.join(PROJECT_ROOT, 'resources/images/GamedayPregameFinalV3-Larger.png'))
draw = ImageDraw.Draw(bg)
# draw.fontmode = "0"
# Setup Colors (via functions)
pref_colors = nhl_game_events.team_colors(game.preferred_team.team_name)
pref_color_primary = pref_colors["primary"]["bg"]
other_colors = nhl_game_events.team_colors(game.other_team.team_name)
logging.debug("Pref Colors - %s // Other Colors - %s", pref_colors, other_colors)
# Setup static coordinates / width / etc
LOGO_WIDTH = 300
COORDS_HOME_LOGO = (325, 270)
COORDS_AWAY_LOGO = (0, 270)
COORDS_RECORDS_Y = 490
COORDS_GAME_NUM = (845, 100)
COORDS_GAMEINFO_DAY_X = 648
COORDS_GAMEINFO_DAY_Y = 290
COORDS_GAMEINFO_DAY = (COORDS_GAMEINFO_DAY_X, COORDS_GAMEINFO_DAY_Y)
COORDS_GAMEINFO_WIDTH = 520
MAX_GAMEINFO_WIDTH = COORDS_GAMEINFO_WIDTH - 10
COORDS_GAMEINFO_LINE2_RECT_TOPLEFT = (648, 381)
COORDS_GAMEINFO_LINE2_RECT_BOTRIGHT = (1168, 451)
COORDS_GAMEINFO_LINE3_RECT_TOPLEFT = (648, 451)
COORDS_GAMEINFO_LINE3_RECT_BOTRIGHT = (1168, 521)
# Load, resize & paste team logos
away_logo = Image.open(os.path.join(PROJECT_ROOT,f"resources/logos/{game.away_team.team_name.replace(' ', '')}.png"))
home_logo = Image.open(os.path.join(PROJECT_ROOT,f"resources/logos/{game.home_team.team_name.replace(' ', '')}.png"))
resize = (300, 200)
away_logo.thumbnail(resize, Image.ANTIALIAS)
home_logo.thumbnail(resize, Image.ANTIALIAS)
bg.paste(away_logo, COORDS_AWAY_LOGO, away_logo)
bg.paste(home_logo, COORDS_HOME_LOGO, home_logo)
# Home Points, Record & Draw
home_pts = game.home_team.points
home_record_str = f"{home_pts} PTS • {game.home_team.current_record}"
home_w, _ = draw.textsize(home_record_str, custom_font_size(FONT_OPENSANS_BOLD, 35))
coords_home_record = (((2 * LOGO_WIDTH + 325 - home_w) / 2), COORDS_RECORDS_Y)
draw.text(coords_home_record, home_record_str, fill=FONT_COLOR_WHITE, font=custom_font_size(FONT_OPENSANS_BOLD, 35))
away_pts = game.away_team.points
away_record_str = f"{away_pts} PTS • {game.away_team.current_record}"
away_w, _ = draw.textsize(away_record_str, custom_font_size(FONT_OPENSANS_BOLD, 35))
coords_away_record = (((LOGO_WIDTH - away_w) / 2), COORDS_RECORDS_Y)
draw.text(coords_away_record, away_record_str, fill=FONT_COLOR_WHITE, font=custom_font_size(FONT_OPENSANS_BOLD, 35))
## TODO: Add logic for pre-season & playoffs here.
game_number_str = f"{game.preferred_team.games + 1} OF 82"
draw.text(COORDS_GAME_NUM, game_number_str, fill=pref_color_primary, font=custom_font_size(FONT_OPENSANS_EXTRABOLD, 80))
# Draw background rectangles for Game Info lines 2 & 3
draw.rectangle([COORDS_GAMEINFO_LINE2_RECT_TOPLEFT, COORDS_GAMEINFO_LINE2_RECT_BOTRIGHT], pref_color_primary)
draw.rectangle([COORDS_GAMEINFO_LINE3_RECT_TOPLEFT, COORDS_GAMEINFO_LINE3_RECT_BOTRIGHT], FONT_COLOR_WHITE)
# Build Day / Date Line
line1_chars = len(game.day_of_game_local + game.month_day_local)
line1_fontsize = int((COORDS_GAMEINFO_WIDTH / line1_chars) + 20)
gameinfo_day = game.day_of_game_local.upper()
day_w, day_h = draw.textsize(gameinfo_day, font=custom_font_size(FONT_OPENSANS_EXTRABOLD, line1_fontsize))
draw.text(COORDS_GAMEINFO_DAY, gameinfo_day, fill=pref_color_primary, font=custom_font_size(FONT_OPENSANS_EXTRABOLD, line1_fontsize))
gameinfo_date = game.month_day_local.upper()
date_w, date_h = draw.textsize(gameinfo_date, font=custom_font_size(FONT_OPENSANS_SEMIBOLD, line1_fontsize))
coords_gameinfo_date = (COORDS_GAMEINFO_DAY_X + (COORDS_GAMEINFO_WIDTH - date_w), COORDS_GAMEINFO_DAY_Y)
draw.text(coords_gameinfo_date, gameinfo_date, fill=pref_color_primary, font=custom_font_size(FONT_OPENSANS_SEMIBOLD, line1_fontsize))
# Build Game Info Line 2 (Time & Venue)
gameinfo_venue = game.venue
gameinfo_time = game.game_time_local.lstrip("0")
gameinfo_line2 = f"{gameinfo_time} • {gameinfo_venue}"
line2_w, line2_h = draw.textsize(gameinfo_line2, font=custom_font_size(FONT_OPENSANS_BOLD, 38))
if line2_w > MAX_GAMEINFO_WIDTH:
logging.info("Line 2 was too long, reducing font size.")
line2_w, line2_h = draw.textsize(gameinfo_line2, font=custom_font_size(FONT_OPENSANS_BOLD, 31))
coords_line2 = (COORDS_GAMEINFO_DAY_X + ((COORDS_GAMEINFO_WIDTH - line2_w) / 2), 390)
draw.text(coords_line2, gameinfo_line2, FONT_COLOR_WHITE, font=custom_font_size(FONT_OPENSANS_BOLD, 31))
else:
coords_line2 = (COORDS_GAMEINFO_DAY_X + ((COORDS_GAMEINFO_WIDTH - line2_w) / 2), 387)
draw.text(coords_line2, gameinfo_line2, FONT_COLOR_WHITE, font=custom_font_size(FONT_OPENSANS_BOLD, 38))
# Build Game Info Line 3 (Game Hashtag & Pref Team Hashtag)
gameinfo_hashtag = game.game_hashtag
gameinfo_teamhashtag = nhl_game_events.team_hashtag(game.preferred_team.team_name, game.game_type)
gameinfo_line3 = f"{gameinfo_teamhashtag} • {gameinfo_hashtag}"
line3_w, line3_h = draw.textsize(gameinfo_line3, font=custom_font_size(FONT_OPENSANS_BOLD, 38))
if line3_w > MAX_GAMEINFO_WIDTH:
logging.info("Line 3 was too long, reducing font size.")
line3_w, line3_h = draw.textsize(gameinfo_line3, font=custom_font_size(FONT_OPENSANS_BOLD, 33))
coords_line3 = (COORDS_GAMEINFO_DAY_X + ((COORDS_GAMEINFO_WIDTH - line3_w) / 2), 460)
draw.text(coords_line3, gameinfo_line3, pref_color_primary, font=custom_font_size(FONT_OPENSANS_BOLD, 33))
else:
coords_line3 = (COORDS_GAMEINFO_DAY_X + ((COORDS_GAMEINFO_WIDTH - line3_w) / 2), 457)
draw.text(coords_line3, gameinfo_line3, pref_color_primary, font=custom_font_size(FONT_OPENSANS_BOLD, 38))
return bg
def preview_image(game):
"""Generates the game preview image and returns the image instance.
Args:
game (Game): The current game instance.
Returns:
img (Image): Image object of game preview.
None: Can return None if tweet already sent
"""
# Check if the preview tweet has been sent already
api = get_api()
search_text = "{} tune".format(TWITTER_ID)
search_results = api.search(q=search_text, count=1)
if len(search_results) > 0:
logging.info("Found an old tune-in tweet - checking if sent today.")
latest_tweet_date = search_results[0].created_at
# If preview tweet was sent today, return False and skip this section
logging.info("Previous tune in tweet - %s", latest_tweet_date)
if latest_tweet_date.date() == datetime.now().date():
return None
# Load required fonts & background image
teams_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Adidas.otf')
details_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Impact.ttf')
bg = Image.open(os.path.join(PROJECT_ROOT, 'resources/images/GamedayBlank.jpg'))
font_black = (0, 0, 0)
# Create & format text for pre-game image
teams_text = "{} vs {}".format(game.away_team.short_name, game.home_team.short_name)
game_date_short = game.game_date_short
game_time = game.game_time_local.replace(" ", "")
if game.game_type == config['GAMETYPE']['PLAYOFFS']:
series_details = ("{} - {} / {} - {}"
.format(game.away_team.short_name, game.away_team.wins,
game.home_team.short_name, game.home_team.wins))
# Convert round number into text
if game.game_id_playoff_round == "1":
playoff_round_text = "First Round"
elif game.game_id_playoff_round == "2":
playoff_round_text = "Second Round"
else:
playoff_round_text = "Unknown Round"
full_details = ("{} - Game #{}\n{}\n\n{} | {} | {}\n#StanleyCup {} {}"
.format(playoff_round_text, game.game_id_playoff_game,
series_details, game.venue, game_date_short, game_time,
nhl_game_events.team_hashtag(game.preferred_team.team_name, game.game_type),
game.game_hashtag))
details_coords = (110, 110)
elif game.game_type == config['GAMETYPE']['PRESEASON']:
details_game = ("PRESEASON | {} | {}"
.format(game_date_short, game_time))
full_details = "{}\n{}\n{}".format(details_game, game.venue, game.game_hashtag)
details_coords = (145, 160)
else:
details_game = ("{} of 82 | {} | {}"
.format(game.preferred_team.games + 1, game_date_short, game_time))
full_details = "{}\n{}\n{}".format(details_game, game.venue, game.game_hashtag)
details_coords = (145, 160)
# Calculate Font Sizes
teams_length = len(teams_text)
teams_font_size = math.floor(1440 / teams_length)
longest_details = 0
for line in iter(full_details.splitlines()):
longest_details = len(line) if len(line) > longest_details else longest_details
details_font_size = math.floor(1100 / longest_details)
font_large = ImageFont.truetype(teams_font, teams_font_size)
font_small = ImageFont.truetype(details_font, details_font_size)
draw = ImageDraw.Draw(bg)
team_coords = (40, 20)
draw.text(team_coords, teams_text, font_black, font_large)
draw.multiline_text(details_coords, full_details, font_black, font_small, None, 10, "center")
return bg
def final_image(game, boxscore_preferred, boxscore_other):
"""Generates the final boxscore image to send in the GAME_END tweet.
Args:
game (Game): The current game instance.
boxscore_preferred (dict): The boxscore JSON dictionary of preferred team.
boxscore_other (dict): The boxscore JSON dictionary of other team.
Returns:
Image: Image object (from PIL library) to be sent to Twitter.
"""
teams_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Adidas.otf')
details_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Impact.ttf')
bg = Image.open(os.path.join(PROJECT_ROOT, 'resources/images/GamedayFinalPrudentialBlank.jpg'))
# Get Game Info for Updated Record
_, schedule_json = is_game_today(get_team(TEAM_BOT))
if game.home_team.preferred:
pref = schedule_json["teams"]["home"]
other = schedule_json["teams"]["away"]
else:
pref = schedule_json["teams"]["away"]
other = schedule_json["teams"]["home"]
# Load & Resize Logos
pref_logo = Image.open(os.path.join(PROJECT_ROOT, 'resources/logos/{}.png'
.format(game.preferred_team.team_name.replace(" ", ""))))
other_logo = Image.open(os.path.join(PROJECT_ROOT, 'resources/logos/{}.png'
.format(game.other_team.team_name.replace(" ", ""))))
resize = (125, 125)
pref_logo.thumbnail(resize, Image.ANTIALIAS)
other_logo.thumbnail(resize, Image.ANTIALIAS)
font_large = ImageFont.truetype(teams_font, 80)
font_small = ImageFont.truetype(details_font, 40)
font_smaller = ImageFont.truetype(details_font, 20)
font_black = (0, 0, 0)
# Setup Coordinates
coords_pref_score = (241, 238)
coords_pref_logo = (279, 240)
coords_pref_record = (270, 328)
coords_other_score = (703, 238)
coords_other_logo = (584, 240)
coords_other_record = (648, 328)
coords_shots = (242, 439)
coords_pk = (465, 439)
coords_pp = (676, 439)
coords_faceoff = (215, 520)
coords_hits = (478, 520)
coords_blocks = (693, 520)
# Setup Text Elements
preferred_team = game.preferred_team
other_team = game.other_team
preferred_stats = boxscore_preferred["teamStats"]["teamSkaterStats"]
other_stats = boxscore_other["teamStats"]["teamSkaterStats"]
preferred_stats_faceoff_percent = float(preferred_stats["faceOffWinPercentage"])
preferred_stats_hits = preferred_stats["hits"]
preferred_stats_ppg = int(preferred_stats["powerPlayGoals"])
preferred_stats_pp = int(preferred_stats["powerPlayOpportunities"])
preferred_stats_blocked = preferred_stats["blocked"]
preferred_stats_pk_against = int(other_stats["powerPlayOpportunities"])
preferred_stats_pk_killed = preferred_stats_pk_against - int(other_stats["powerPlayGoals"])
# Score & Record
text_pref_score = game.preferred_team.score
text_other_score = game.other_team.score
# Update records & get new for final image (Playoffs)
if game.game_type == "P":
if game.preferred_team.score > game.other_team.score:
pref_outcome = "win"
other_outcome = "loss"
else:
other_outcome = "win"
pref_outcome = "loss"
pref_record_str = preferred_team.get_new_playoff_series(pref_outcome)
other_record_str = other_team.get_new_playoff_series(other_outcome)
else:
if game.preferred_team.score > game.other_team.score:
pref_outcome = "win"
other_outcome = "loss" if game.period.current < 4 else "ot"
else:
other_outcome = "win"
pref_outcome = "loss" if game.period.current < 4 else "ot"
pref_record_str = preferred_team.get_new_record(pref_outcome)
other_record_str = other_team.get_new_record(other_outcome)
text_shots = preferred_team.shots
text_pk = "{} / {}".format(preferred_stats_pk_killed, preferred_stats_pk_against)
text_pp = "{} / {}".format(preferred_stats_ppg, preferred_stats_pp)
text_faceoff = "{}%".format(preferred_stats_faceoff_percent)
text_hits = preferred_stats_hits
text_blocks = preferred_stats_blocked
bg.paste(pref_logo, coords_pref_logo, pref_logo)
bg.paste(other_logo, coords_other_logo, other_logo)
draw = ImageDraw.Draw(bg)
draw.text(coords_pref_score, str(text_pref_score), font_black, font_large)
draw.text(coords_other_score, str(text_other_score), font_black, font_large)
draw.text(coords_pref_record, pref_record_str, font_black, font_smaller)
draw.text(coords_other_record, other_record_str, font_black, font_smaller)
draw.text(coords_shots, str(text_shots), font_black, font_small)
draw.text(coords_pk, str(text_pk), font_black, font_small)
draw.text(coords_pp, str(text_pp), font_black, font_small)
draw.text(coords_faceoff, str(text_faceoff), font_black, font_small)
draw.text(coords_hits, str(text_hits), font_black, font_small)
draw.text(coords_blocks, str(text_blocks), font_black, font_small)
return bg
def stats_image_bar_generator(draw, stat, pref_stat_value, other_stat_value,
pref_colors, other_colors):
logging.debug("Stats Bar Generator: stat - %s, pref_value - %s, other_value - %s, pref_colors - %s, other_colors - %s",
stat, pref_stat_value, other_stat_value, pref_colors, other_colors)
# Load all fonts to be used within the image generator
font_opensans_regular = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Regular.ttf')
font_opensans_italic = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Italic.ttf')
font_opensans_bold = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Bold.ttf')
font_opensans_bolditalic = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-BoldItalic.ttf')
# Static Font Sizes
font_opensans_regular_large = ImageFont.truetype(font_opensans_regular, 80)
font_opensans_regular_small = ImageFont.truetype(font_opensans_regular, 40)
font_opensans_regular_smaller = ImageFont.truetype(font_opensans_regular, 30)
font_opensans_regular_xxs = ImageFont.truetype(font_opensans_regular, 20)
font_opensans_italic_xs = ImageFont.truetype(font_opensans_italic, 25)
font_opensans_italic_xxs = ImageFont.truetype(font_opensans_italic, 20)
font_opensans_bold_large = ImageFont.truetype(font_opensans_bold, 90)
font_opensans_bold_small = ImageFont.truetype(font_opensans_bold, 40)
font_opensans_bold_smaller = ImageFont.truetype(font_opensans_bold, 30)
font_opensans_bold_xs = ImageFont.truetype(font_opensans_bold, 27)
font_opensans_boldit_small = ImageFont.truetype(font_opensans_bolditalic, 40)
font_opensans_boldit_smallish = ImageFont.truetype(font_opensans_bolditalic, 35)
font_opensans_boldit_smaller = ImageFont.truetype(font_opensans_bolditalic, 30)
font_opensans_boldit_xs = ImageFont.truetype(font_opensans_bolditalic, 25)
font_opensans_boldit_xxs = ImageFont.truetype(font_opensans_bolditalic, 20)
# Define static values, text strings & coordinates
STATS_RECT_WIDTH = 437
STATS_RECT_TOPLEFT_X = 279
STATS_RECT_HEIGHT = 49
FONT_BLACK = (0, 0, 0)
FONT_WHITE = (255, 255, 255)
# Check stat type and set specific parameters here
if stat == "shots":
stat_total = pref_stat_value + other_stat_value
stat_total_text = f"SHOTS: {stat_total}"
stat_total_text_coords = (50, 243)
stat_total_text_font = font_opensans_boldit_smaller
stat_rect_pref_topleft_y = 241
elif stat == "blocked shots":
stat_total = pref_stat_value + other_stat_value
stat_total_text = f"BLOCKED SHOTS: {stat_total}"
stat_total_text_font = custom_font_size(font_opensans_bolditalic, 23)
stat_total_text_coords = (50, 335)
stat_rect_pref_topleft_y = 328
elif stat == "hits":
stat_total = pref_stat_value + other_stat_value
stat_total_text = f"HITS: {stat_total}"
stat_total_text_font = font_opensans_boldit_smaller
stat_total_text_coords = (50, 510)
stat_rect_pref_topleft_y = 505
elif stat == "power play":
pref_powerplays, pref_ppg = pref_stat_value
other_powerplays, other_ppg = other_stat_value
power_play_pref = f"{int(pref_ppg)} / {int(pref_powerplays)}"
power_play_other = f"{int(other_ppg)} / {int(other_powerplays)}"
# Re-assign values
pref_stat_value = pref_powerplays
other_stat_value = other_powerplays
stat_total = pref_powerplays + other_powerplays
stat_total_text = f"POWER PLAYS: {int(stat_total)}"
stat_total_text_font = custom_font_size(font_opensans_bolditalic, 23)
stat_total_text_coords = (50, 423)
stat_rect_pref_topleft_y = 416
elif stat == "penalty minutes":
stat_total = pref_stat_value + other_stat_value
stat_total_text = f"PENALTY MINUTES: {stat_total}"
stat_total_text_font = custom_font_size(font_opensans_bolditalic, 20)
stat_total_text_coords = (50, 603)
stat_rect_pref_topleft_y = 592
# Calculate the remainder of the coordinates
stat_rect_width_pref = STATS_RECT_WIDTH * (pref_stat_value / stat_total)
stat_rect_width_other = STATS_RECT_WIDTH * (other_stat_value / stat_total)
stat_rect_pref_topleft_x = STATS_RECT_TOPLEFT_X
stat_rect_pref_bottomright_x = stat_rect_pref_topleft_x + stat_rect_width_pref
stat_rect_pref_bottomright_y = stat_rect_pref_topleft_y + STATS_RECT_HEIGHT
stat_text_pref_coords = (stat_rect_pref_topleft_x + 10, stat_rect_pref_topleft_y + 6)
stat_rect_other_topleft_x = stat_rect_pref_bottomright_x
stat_rect_other_topleft_y = stat_rect_pref_topleft_y
stat_rect_other_bottomright_x = stat_rect_other_topleft_x + stat_rect_width_other
stat_rect_other_bottomright_y = stat_rect_pref_bottomright_y
stat_text_other_coords = (stat_rect_other_topleft_x + 10, stat_rect_other_topleft_y + 6)
# Draw the text fields & bars
if stat == "power play":
draw.rectangle([stat_rect_pref_topleft_x, stat_rect_pref_topleft_y, stat_rect_pref_bottomright_x,
stat_rect_pref_bottomright_y], outline=None, fill=pref_colors["bg"])
draw.rectangle([stat_rect_other_topleft_x, stat_rect_other_topleft_y, stat_rect_other_bottomright_x,
stat_rect_other_bottomright_y], outline=None, fill=other_colors["bg"])
if pref_powerplays != 0:
draw.text(stat_text_pref_coords, power_play_pref, pref_colors["text"], font_opensans_bold_xs)
if other_powerplays != 0:
draw.text(stat_text_other_coords, power_play_other, other_colors["text"], font_opensans_bold_xs)
draw.text(stat_total_text_coords, stat_total_text, FONT_WHITE, stat_total_text_font)
else:
draw.rectangle([stat_rect_pref_topleft_x, stat_rect_pref_topleft_y, stat_rect_pref_bottomright_x,
stat_rect_pref_bottomright_y], outline=None, fill=pref_colors["bg"])
draw.rectangle([stat_rect_other_topleft_x, stat_rect_other_topleft_y, stat_rect_other_bottomright_x,
stat_rect_other_bottomright_y], outline=None, fill=other_colors["bg"])
draw.text(stat_text_pref_coords, str(pref_stat_value), pref_colors["text"], font_opensans_bold_xs)
draw.text(stat_text_other_coords, str(other_stat_value), other_colors["text"], font_opensans_bold_xs)
draw.text(stat_total_text_coords, stat_total_text, FONT_WHITE, stat_total_text_font)
def stats_image_generator(game, bg_type, boxscore_preferred, boxscore_other):
logging.debug("Stats Image Generator Game: %s", game)
logging.debug("Stats Image Generator BG: %s", bg_type)
# logging.debug("Stats Image Generator BOXPREF: %s", boxscore_preferred)
# logging.debug("Stats Image Generator BOXOTHER: %s", boxscore_other)
# Define static values, text strings & coordinates
STATS_RECT_WIDTH = 437
STATS_RECT_TOPLEFT_X = 279
STATS_RECT_HEIGHT = 49
FONT_BLACK = (0, 0, 0)
FONT_WHITE = (255, 255, 255)
COORDS_PREF_LOGO = (840, 120)
COORDS_OTHER_LOGO = (1015, 120)
COORDS_PREF_RECORD = (910, 135)
COORDS_OTHER_RECORD = (1110, 135)
COORDS_LOGO_VS = (960, 130)
COORDS_TEAMS_VS_Y = 198
COORDS_TEAMS_VS_X = 275
WIDTH_TEAMS_VS = 447
COORDS_TEAMS_VS = (335, 198)
TEAMS_VS_W, TEAMS_VS_H = (447, 39)
# Load & Resize Logos
pref_logo = Image.open(os.path.join(PROJECT_ROOT, 'resources/logos/{}.png'
.format(game.preferred_team.team_name.replace(" ", ""))))
other_logo = Image.open(os.path.join(PROJECT_ROOT, 'resources/logos/{}.png'
.format(game.other_team.team_name.replace(" ", ""))))
resize = (120, 120)
pref_logo.thumbnail(resize, Image.ANTIALIAS)
other_logo.thumbnail(resize, Image.ANTIALIAS)
# Change background image based on intermission or game final
# Also change the "losing team" image to grayscale for final
if bg_type == "intermission":
bg = Image.open(os.path.join(PROJECT_ROOT, 'resources/images/GamedayIntermissionFinal-V3Larger.png'))
bg.paste(pref_logo, COORDS_PREF_LOGO, pref_logo)
bg.paste(other_logo, COORDS_OTHER_LOGO, other_logo)
else:
bg = Image.open(os.path.join(PROJECT_ROOT, 'resources/images/GamedayRecapFinalV3-Larger.png'))
COORDS_PREF_LOGO = (780, 120)
COORDS_OTHER_LOGO = (985, 120)
COORDS_LOGO_VS = (-100, -100)
if game.preferred_team.score > game.other_team.score:
bg.paste(pref_logo, COORDS_PREF_LOGO, pref_logo)
bg.paste(other_logo.convert('LA'), COORDS_OTHER_LOGO, other_logo)
else:
bg.paste(pref_logo.convert('LA'), COORDS_PREF_LOGO, pref_logo)
bg.paste(other_logo, COORDS_OTHER_LOGO, other_logo)
# Load all fonts to be used within the image generator
teams_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Adidas.otf')
details_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Impact.ttf')
font_opensans_regular = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Regular.ttf')
font_opensans_italic = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Italic.ttf')
font_opensans_bold = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Bold.ttf')
font_opensans_bolditalic = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-BoldItalic.ttf')
# Static Font Sizes
font_opensans_regular_large = ImageFont.truetype(font_opensans_regular, 80)
font_opensans_regular_small = ImageFont.truetype(font_opensans_regular, 40)
font_opensans_regular_smaller = ImageFont.truetype(font_opensans_regular, 30)
font_opensans_regular_xxs = ImageFont.truetype(font_opensans_regular, 20)
font_opensans_italic_xs = ImageFont.truetype(font_opensans_italic, 25)
font_opensans_italic_xxs = ImageFont.truetype(font_opensans_italic, 20)
font_opensans_bold_large = ImageFont.truetype(font_opensans_bold, 90)
font_opensans_bold_small = ImageFont.truetype(font_opensans_bold, 40)
font_opensans_bold_smaller = ImageFont.truetype(font_opensans_bold, 30)
font_opensans_bold_xs = ImageFont.truetype(font_opensans_bold, 27)
font_opensans_boldit_small = ImageFont.truetype(font_opensans_bolditalic, 40)
font_opensans_boldit_smallish = ImageFont.truetype(font_opensans_bolditalic, 35)
font_opensans_boldit_smaller = ImageFont.truetype(font_opensans_bolditalic, 30)
font_opensans_boldit_xs = ImageFont.truetype(font_opensans_bolditalic, 25)
font_opensans_boldit_xxs = ImageFont.truetype(font_opensans_bolditalic, 20)
# Setup Colors (via functions)
pref_colors = nhl_game_events.team_colors(game.preferred_team.team_name)
other_colors = nhl_game_events.team_colors(game.other_team.team_name)
logging.debug("Pref Colors - %s // Other Colors - %s", pref_colors, other_colors)
if are_colors_similar(pref_colors["primary"]["bg"], other_colors["primary"]["bg"]):
logging.debug("Primary Colors are Similar!")
pref_colors_all = pref_colors["primary"]
pref_colors_bg = pref_colors["primary"]["bg"]
pref_colors_text = pref_colors["primary"]["text"]
other_colors_all = other_colors["secondary"]
other_colors_bg = other_colors["secondary"]["bg"]
other_colors_text = other_colors["secondary"]["text"]
else:
pref_colors_all = pref_colors["primary"]
pref_colors_bg = pref_colors["primary"]["bg"]
pref_colors_text = pref_colors["primary"]["text"]
other_colors_all = other_colors["primary"]
other_colors_bg = other_colors["primary"]["bg"]
other_colors_text = other_colors["primary"]["text"]
logging.debug("(After Similar) -- Pref Colors - %s // Other Colors - %s", pref_colors, other_colors)
# Draw the rest of the image
draw = ImageDraw.Draw(bg)
draw.fontmode = "0"
# Draw "VS" or Updated Record
if bg_type == "intermission":
draw.text(COORDS_LOGO_VS, "vs", FONT_WHITE, font_opensans_bold_small)
else:
# Update records & get new for final image (Playoffs)
if game.game_type == "P":
if game.preferred_team.score > game.other_team.score:
pref_outcome = "win"
other_outcome = "loss"
else:
other_outcome = "win"
pref_outcome = "loss"
pref_str = game.preferred_team.get_new_playoff_series(pref_outcome)
other_str = game.other_team.get_new_playoff_series(other_outcome)
else:
if game.preferred_team.score > game.other_team.score:
pref_outcome = "win"
other_outcome = "loss" if game.period.current < 4 else "ot"
else:
other_outcome = "win"
pref_outcome = "loss" if game.period.current < 4 else "ot"
pref_points_str = game.preferred_team.get_new_points(pref_outcome)
pref_record_str = game.preferred_team.get_new_record(pref_outcome)
other_points_str = game.other_team.get_new_points(other_outcome)
other_record_str = game.other_team.get_new_record(other_outcome)
pref_str = f"{pref_points_str} PTS\n{pref_record_str}"
other_str = f"{other_points_str} PTS\n{other_record_str}"
draw.text(COORDS_PREF_RECORD, pref_str, FONT_WHITE, custom_font_size(font_opensans_bold, 16), align="center")
draw.text(COORDS_OTHER_RECORD, other_str, FONT_WHITE, custom_font_size(font_opensans_bold, 16), align="center")
# Create Team Name String & Calculate Center
teams_vs_text = f"{game.preferred_team.short_name} vs. {game.other_team.short_name}".upper()
w, h = draw.textsize(teams_vs_text, font_opensans_bold_smaller)
if w < WIDTH_TEAMS_VS:
coords_teams_vs_calc = (COORDS_TEAMS_VS_X + ((TEAMS_VS_W - w) / 2), COORDS_TEAMS_VS_Y)
draw.text(coords_teams_vs_calc, teams_vs_text, FONT_BLACK, font_opensans_bold_smaller)
else:
w, h = draw.textsize(teams_vs_text, font_opensans_bold_xs)
coords_teams_vs_calc = (COORDS_TEAMS_VS_X + ((TEAMS_VS_W - w) / 2), COORDS_TEAMS_VS_Y)
draw.text(coords_teams_vs_calc, teams_vs_text, FONT_BLACK, font_opensans_bold_smaller)
# Draw the stats bars
preferred_stats = boxscore_preferred["teamStats"]["teamSkaterStats"]
other_stats = boxscore_other["teamStats"]["teamSkaterStats"]
stats_image_bar_generator(draw, "shots", preferred_stats["shots"],
other_stats["shots"], pref_colors_all, other_colors_all)
stats_image_bar_generator(draw, "blocked shots", preferred_stats["blocked"],
other_stats["blocked"], pref_colors_all, other_colors_all)
stats_image_bar_generator(draw, "hits", preferred_stats["hits"],
other_stats["hits"], pref_colors_all, other_colors_all)
# Some games go through multiple periods without a single penalty being called.
# Checking here removes the `Divide by Zero` errors.
if (
preferred_stats["pim"] != 0 and
other_stats["pim"] != 0 and
preferred_stats["powerPlayOpportunities"] != 0 and
other_stats["powerPlayOpportunities"] !=0
):
stats_image_bar_generator(draw, "penalty minutes", preferred_stats["pim"],
other_stats["pim"], pref_colors_all, other_colors_all)
# Power Play requires a Tuple to be passed in (instead of a integer)
pref_powerplay = (preferred_stats["powerPlayOpportunities"], preferred_stats["powerPlayGoals"])
other_powerplay = (other_stats["powerPlayOpportunities"], other_stats["powerPlayGoals"])
logging.debug("Calling Stats Bar: pref_pp - %s, other_pp - %s, pref_colors - %s, other_colors - %s",
pref_powerplay, other_powerplay, pref_colors_all, other_colors_all)
stats_image_bar_generator(draw, "power play", pref_powerplay, other_powerplay,
pref_colors_all, other_colors_all)
else:
# If PIM & PP == 0, draw only the labels
penalty_total_text = "PENALTY MINUTES: 0"
penalty_total_text_font = custom_font_size(font_opensans_bolditalic, 20)
penalty_total_text_coords = (50, 603)
draw.text(penalty_total_text_coords, penalty_total_text, FONT_WHITE, penalty_total_text_font)
pp_total_text = "POWER PLAYS: 0"
pp_total_text_font = custom_font_size(font_opensans_bolditalic, 23)
pp_total_text_coords = (50, 423)
draw.text(pp_total_text_coords, pp_total_text, FONT_WHITE, pp_total_text_font)
# Setup & Draw Faceoff Graph (including inner / outer circles)
logging.debug("Generating Faceoff Stats for Image.")
text_title_faceoff = "FACEOFF %"
coords_faceoff_title = (950, 500)
coords_faceoff_pref = (950, 550)
coords_faceoff_other = (950, 575)
coords_faceoff_box = [780, 475, 920, 615]
coords_faceoff_box_inner_black = [810, 505, 890, 585]
coords_faceoff_box_inner_white = [809, 504, 891, 586]
pref_faceoff = float(preferred_stats["faceOffWinPercentage"])
text_faceoff_pref = f"{game.preferred_team.short_name}: {pref_faceoff}%".upper()
other_faceoff = float(other_stats["faceOffWinPercentage"])
text_faceoff_other = f"{game.other_team.short_name}: {other_faceoff}%".upper()
faceoff_angle = (pref_faceoff / 100) * 360
logging.debug("Preferred Faceoff: %s", pref_faceoff)
logging.debug("Faceoff Angle: %s", faceoff_angle)
text_title_faceoff = "FACEOFF %"
draw.text(coords_faceoff_title, text_title_faceoff, FONT_WHITE, font_opensans_boldit_small)
draw.text(coords_faceoff_pref, text_faceoff_pref, FONT_WHITE, font_opensans_regular_xxs)
draw.text(coords_faceoff_other, text_faceoff_other, FONT_WHITE, font_opensans_regular_xxs)
draw.pieslice(coords_faceoff_box, 0, faceoff_angle, fill=pref_colors_bg)
draw.pieslice(coords_faceoff_box, faceoff_angle, 360, fill=other_colors_bg)
# Draw outlines & inner circles
# draw.pieslice(coords_faceoff_box_inner_white, 0, 360, fill=(255, 255, 255))
draw.pieslice(coords_faceoff_box_inner_black, 0, 360, fill=(0, 0, 0))
# Draw Goals & Score Text
coords_pref_score = (1095, 198)
coords_pref_score_goals_box = [760, 210, 873, 246]
coords_pref_score_goals_text = (764, 215)
coords_goals_pref = (775, 256)
coords_other_score = (1095, 328)
coords_other_score_goals_box = [760, 336, 873, 372]
coords_other_score_goals_text = (764, 341)
coords_goals_other = (775, 378)
text_pref_score = game.preferred_team.score
text_other_score = game.other_team.score
text_pref_goal_title = f"{game.preferred_team.tri_code} GOALS".upper()
text_other_goal_title = f"{game.other_team.tri_code} GOALS".upper()
pref_goals_array = []
other_goals_array = []
logging.debug("Looping through preferred goals for stat box.")
preferred_boxscore_players = boxscore_preferred["players"]
for id, player in preferred_boxscore_players.items():
try:
if player["stats"]["skaterStats"]["goals"] == 1:
player_name = player["person"]["fullName"]
player_first_name = player_name.split()[0]
player_first_letter = player_first_name[0]
player_last_name = ' '.join(player_name.split()[1:])
player_abbrev_name = f"{player_first_letter}. {player_last_name}"
pref_goals_array.append(player_abbrev_name)
elif player["stats"]["skaterStats"]["goals"] > 1:
player_goals = player["stats"]["skaterStats"]["goals"]
player_name = player["person"]["fullName"]
player_first_name = player_name.split()[0]
player_first_letter = player_first_name[0]
player_last_name = ' '.join(player_name.split()[1:])
player_abbrev_name = f"{player_first_letter}. {player_last_name} [{player_goals}]"
pref_goals_array.append(player_abbrev_name)
except KeyError:
logging.debug("Stats for %s not available.", player["person"]["fullName"])
logging.debug("Looping through preferred goals for stat box.")
other_boxscore_players = boxscore_other["players"]
for id, player in other_boxscore_players.items():
try:
if player["stats"]["skaterStats"]["goals"] == 1:
player_name = player["person"]["fullName"]
player_first_name = player_name.split()[0]
player_first_letter = player_first_name[0]
player_last_name = ' '.join(player_name.split()[1:])
player_abbrev_name = f"{player_first_letter}. {player_last_name}"
other_goals_array.append(player_abbrev_name)
elif player["stats"]["skaterStats"]["goals"] > 1:
player_goals = player["stats"]["skaterStats"]["goals"]
player_name = player["person"]["fullName"]
player_first_name = player_name.split()[0]
player_first_letter = player_first_name[0]
player_last_name = ' '.join(player_name.split()[1:])
player_abbrev_name = f"{player_first_letter}. {player_last_name} [{player_goals}]"
other_goals_array.append(player_abbrev_name)
except KeyError:
logging.debug("Stats for %s not available.", player["person"]["fullName"])
logging.debug("Pref Goals: %s // Other Goals: %s", pref_goals_array, other_goals_array)
if len(pref_goals_array) < 4:
text_goals_pref = ", ".join(pref_goals_array)
logging.debug("Length: %s // String: %s", len(pref_goals_array), text_goals_pref)
else:
for idx, scorer in enumerate(pref_goals_array):
logging.debug("%s: %s", idx, scorer)
text_goals_pref = ", ".join(pref_goals_array[0:3])
text_goals_pref = text_goals_pref + "\n" + ", ".join(pref_goals_array[3:])
logging.debug("Length: %s // String: %s", len(pref_goals_array), text_goals_pref)
if len(other_goals_array) < 4:
text_goals_other = ", ".join(other_goals_array)
else:
text_goals_other = ", ".join(other_goals_array[0:3])
text_goals_other = text_goals_other + "\n" + ", ".join(other_goals_array[3:])
logging.debug("Drawing team score text.")
draw.text(coords_pref_score, str(text_pref_score), pref_colors_bg, font_opensans_bold_large)
draw.text(coords_other_score, str(text_other_score), other_colors_bg, font_opensans_bold_large)
logging.debug("Drawing team goal rects & title.")
draw.rectangle(coords_pref_score_goals_box, outline=None, fill=pref_colors_bg)
draw.rectangle(coords_other_score_goals_box, outline=None, fill=other_colors_bg)
draw.text(coords_pref_score_goals_text, text_pref_goal_title, FONT_WHITE, custom_font_size(font_opensans_bold, 18))
draw.text(coords_other_score_goals_text, text_other_goal_title, FONT_WHITE, custom_font_size(font_opensans_bold, 18))
logging.debug("Drawing goal scorer text.")
draw.multiline_text(coords_goals_pref, text_goals_pref, FONT_WHITE, custom_font_size(font_opensans_bold, 16))
draw.multiline_text(coords_goals_other, text_goals_other, FONT_WHITE, custom_font_size(font_opensans_bold, 16))
return bg
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# NHL Score & Parsing Methods
# ------------------------------------------------------------------------------
def get_team(nhl_team):
"""
Passes team name to NHL API and returns team ID.
:param team: Valid NHL team name.
"""
team_name = nhl_team.title()
url = "{}/api/v1/teams".format(NHLAPI_BASEURL)
logging.info("Sending API Request - %s", url)
team_json = req_session.get(url).json()
teams = team_json["teams"]
team_id = None
for team in teams:
if team["name"] == team_name:
team_id = team["id"]
if not team_id:
raise ValueError("{} is not a valid NHL team. Check your configuraiton file!"
.format(team_name))
return team_id
def is_game_today(team_id):
"""Queries the NHL Schedule API to determine if there is a game today.
Args:
team_id (int) - The unique identifier of the team (from get_team function).
Returns:
(bool, games_info)
bool - True if game today, False if not.
games_info (dict) - A dictionary from the Schedule API that describes game information.
"""
now = datetime.now()
if args.yesterday:
now = now - timedelta(days=1)
elif args.date is not None:
try:
now = datetime.strptime(args.date, '%Y-%m-%d')
except ValueError as e:
logging.error("Invalid override date - exiting.")
logging.error(e)
sys.exit()
url = ("{}/api/v1/schedule?teamId={}&expand="
"schedule.broadcasts,schedule.teams&date={:%Y-%m-%d}"
.format(NHLAPI_BASEURL, team_id, now))
try:
logging.info("Sending API Request - %s", url)
schedule = req_session.get(url).json()
games_total = schedule["totalItems"]
except requests.exceptions.RequestException:
return False, None
if games_total == 1:
games_info = schedule["dates"][0]["games"][0]
return True, games_info
elif games_total == 2:
dirname = os.path.dirname(os.path.realpath(__file__))
if args.split is False:
logging.info("Split squad detected, spawning a second process to pick up second game.")
game_index = 0
if args.date is not None:
spawn_args = ' '.join(sys.argv[1:])
logging.debug("Spawning Process: python3 %s/hockey_twitter_bot.py --split %s", dirname, spawn_args)
Popen(['python3 ' + dirname + '/hockey_twitter_bot.py --split ' + spawn_args], shell=True)
else:
Popen(['nohup python3 ' + dirname + '/hockey_twitter_bot.py --split &'], shell=True)
else:
logging.info("Split squad detected, this is the second spawned process to pick up second game (sleep for 5 seconds).")
time.sleep(5)
game_index = 1
games_info = schedule["dates"][0]["games"][game_index]
return True, games_info
return False, None
def calculate_shot_distance(play):
"""Parses a play and returns the distance from the net.
Args:
play (dict): A dictionary of a penalty play attributes.
Note:
dist_string (String): distance with unit (foot / feet)
"""
event_x = abs(play['coordinates']['x'])
event_y = play['coordinates']['y']
approx_goal_x = 89
approx_goal_y = 0
shot_dist = math.ceil(math.hypot(event_x - approx_goal_x, event_y - approx_goal_y))
# shot_dist = abs(math.ceil(approx_goal_x - shot_x))
if shot_dist == 1:
shot_dist_unit = 'foot'
else:
shot_dist_unit = 'feet'
dist_string = f'{shot_dist} {shot_dist_unit}'
return dist_string
def get_lineup(game, period, on_ice, players):
"""Tweets out the starting lineup for the preferred team.
Args:
game (Game): The current game instance.
period (Period): The current period instance.
on_ice (list): A list of players on the ice for the preferred team.
players (dict): A dictionary of all players of the preferred team.
"""
logging.info("On Ice Players - {}".format(on_ice))
forwards = []
defense = []
goalies = []
for player in on_ice:
key_id = "ID{}".format(player)
player_obj = players[key_id]
logging.debug("Getting information for %s -- %s", key_id, player_obj)
# player_last_name = player_obj["person"]["lastName"]
player_last_name = player_obj["lastName"]
player_type = player_obj["primaryPosition"]["type"]
if player_type == "Forward":
forwards.append(player_last_name)
elif player_type == "Defenseman":
defense.append(player_last_name)
elif player_type == "Goalie":
goalies.append(player_last_name)
if period == 1:
tweet_forwards = "-".join(forwards)
tweet_defense = "-".join(defense)
tweet_goalie = goalies[0]
tweet_text = ("Tonight's {} starting lineup for your {} -\n\n{}\n{}\n{}"
.format(game.game_hashtag, game.preferred_team.team_name,
tweet_forwards, tweet_defense, tweet_goalie))
send_tweet(tweet_text)
if args.discord:
send_discord(CHANNEL_ID, tweet_text)
elif period == 4 and game.game_type in ("PR", "R"):
all_players = forwards + defense
tweet_players = "-".join(all_players)
try:
tweet_goalie = goalies[0]
tweet_text = ("On the ice to start overtime for your {} are:\n\n{} & {}\n\n{}"
.format(game.preferred_team.team_name, tweet_players,
tweet_goalie, game.game_hashtag))
except IndexError:
# If for some reason a goalie isn't detected on ice
tweet_text = ("On the ice to start overtime for your {} are:\n\n{}\n\n{}"
.format(game.preferred_team.team_name, tweet_players, game.game_hashtag))
send_tweet(tweet_text)
if args.discord:
send_discord(CHANNEL_ID, tweet_text)
elif period > 3 and game.game_type == "P":
ot_number = period - 3
tweet_forwards = "-".join(forwards)
tweet_defense = "-".join(defense)
tweet_goalie = goalies[0]
tweet_text = ("On the ice to start OT{} for your {} -\n\n{}\n{}\n{}"
.format(ot_number, game.preferred_team.team_name,
tweet_forwards, tweet_defense, tweet_goalie))
send_tweet(tweet_text)
if args.discord:
send_discord(CHANNEL_ID, tweet_text)
def goalie_pull_tweet(game, team):
"""Tweets a goalie pulled if the detected
Args:
game (Game): The current game instance.
team (str): A string equal to home or away to indicate team.
"""
goalie_pull_team = game.home_team.short_name if team == "home" else game.away_team.short_name
goalie_pull_text = ("The {} have pulled their goalie with {} left in the {} period. {}"
.format(goalie_pull_team, game.period.time_remaining,
game.period.current_ordinal, game.game_hashtag))
send_tweet(goalie_pull_text)
if args.discord:
send_discord(CHANNEL_ID, goalie_pull_text)
def parse_penalty(play, game):
"""Parses a JSON object of a penalty passed from loop_game_events.
Args:
play (dict): A dictionary of a penalty play attributes.
game (Game): The current game instance.
Note:
No return value, sends a tweet.
"""
penalty_team_name = play["team"]["name"]
if penalty_team_name == game.home_team.team_name:
penalty_on_team = game.home_team
penalty_draw_team = game.away_team
else:
penalty_on_team = game.away_team
penalty_draw_team = game.home_team
# Get current game & skater attributes
power_play_strength = game.power_play_strength
penalty_on_skaters = penalty_on_team.skaters
penalty_draw_skaters = penalty_draw_team.skaters
# Might be able to use these later to change wording
# penalty_on_team_name = penalty_on_team.short_name
# penalty_draw_team_name = penalty_draw_team.short_name
logging.info("PP Strength - %s | PenaltyOn Skaters - %s | PenaltyDraw Skaters - %s",
power_play_strength, penalty_on_skaters, penalty_draw_skaters)
preferred_shortname = game.preferred_team.short_name
# Determine skaters per side
if power_play_strength == "Even" and penalty_on_skaters == 4 and penalty_draw_skaters == 4:
# Teams are skating 4 on 4
penalty_text_skaters = "Teams will skate 4-on-4."
elif power_play_strength == "Even" and penalty_on_skaters == 3 and penalty_draw_skaters == 3:
# Teams are skating 3 on 3 in regulation
penalty_text_skaters = "Teams will skate 3-on-3."
elif power_play_strength != "Even":
if game.preferred_team.skaters == 5 and game.other_team.skaters == 4:
penalty_text_skaters = "{} are headed to the power play!".format(preferred_shortname)
elif game.preferred_team.skaters == 5 and game.other_team.skaters == 3:
penalty_text_skaters = "{} will have a two-man advantage!".format(preferred_shortname)
elif game.preferred_team.skaters == 4 and game.other_team.skaters == 5:
penalty_text_skaters = "{} are headed to the penalty kill.".format(preferred_shortname)
elif game.preferred_team.skaters == 4 and game.other_team.skaters == 3:
penalty_text_skaters = ("{} are headed to a 4-on-3 power play."
.format(preferred_shortname))
elif game.preferred_team.skaters == 3 and game.other_team.skaters == 5:
penalty_text_skaters = ("{} will have to kill off a two-man advantage."
.format(preferred_shortname))
elif game.preferred_team.skaters == 3 and game.other_team.skaters == 4:
penalty_text_skaters = ("{} will have a 4-on-3 PK to contend with."
.format(preferred_shortname))
else:
penalty_text_skaters = ""
for player in play["players"]:
if player["playerType"] == "PenaltyOn":
penalty_playeron = player["player"]["fullName"]
break
penalty_type = play["result"]["secondaryType"].lower()
penalty_severity = play["result"]["penaltySeverity"].lower()
penalty_minutes = play["result"]["penaltyMinutes"]
penalty_period_remain = play["about"]["periodTimeRemaining"]
penalty_period = play["about"]["ordinalNum"]
penalty_text_players = ("{} takes a {}-minute {} penalty for {} and "
"heads to the penalty box with {} left in the {} period."
.format(penalty_playeron, penalty_minutes, penalty_severity,
penalty_type, penalty_period_remain, penalty_period))
# Build power play / penalty kill stats
penalty_on_stats = penalty_on_team.get_stat_and_rank("penaltyKillPercentage")
penalty_draw_stats = penalty_draw_team.get_stat_and_rank("powerPlayPercentage")
penalty_on_team_name = penalty_on_team.short_name
penalty_on_stat = penalty_on_stats[0]
penalty_on_rank = penalty_on_stats[1]
penalty_on_rankstat_str = ("{} PK: {}% ({})"
.format(penalty_on_team_name, penalty_on_stat, penalty_on_rank))
penalty_draw_team_name = penalty_draw_team.short_name
penalty_draw_stat = penalty_draw_stats[0]
penalty_draw_rank = penalty_draw_stats[1]
penalty_draw_rankstat_str = ("{} PP: {}% ({})"
.format(penalty_draw_team_name, penalty_draw_stat, penalty_draw_rank))
if power_play_strength != "Even":
penalty_tweet = ("{} {}\n\n{}\n{}\n\n{}"
.format(penalty_text_players, penalty_text_skaters,
penalty_on_rankstat_str, penalty_draw_rankstat_str,
game.game_hashtag))
else:
penalty_tweet = ("{} {}\n\n{}"
.format(penalty_text_players, penalty_text_skaters, game.game_hashtag))
penalty_tweet_id = send_tweet(penalty_tweet)
if args.discord:
send_discord(CHANNEL_ID, penalty_tweet)
def parse_regular_goal(play, game):
"""Parses attributes of a goal and tweets out the result.
Args:
play (dict): A dictionary of a penalty play attributes.
game (Game): The current game instance.
"""
goal_eventidx = play["about"]["eventIdx"]
if game.assists_check == 0:
logging.info("Event #%s was a goal - initial parsing loop!", goal_eventidx)
else:
logging.info("Parsing event #%s for assists - check #%s.",
goal_eventidx, game.assists_check)
# Get players associated with scoring the goal (scorer & assists [in an array])
assists = []
for player in play["players"]:
if player["playerType"] == "Scorer":
goal_scorer_name = player["player"]["fullName"]
goal_scorer_total = player["seasonTotal"]
elif player["playerType"] == "Assist":
player_name = player["player"]["fullName"]
assist_total = player["seasonTotal"]
assists.append(f'{player_name} ({assist_total})')
elif player["playerType"] == "Goalie":
goalie_name = player["player"]["fullName"]
# Get other goal-related attributes
goal_team = play["team"]["name"]
goal_description = play["result"]["description"]
goal_type = play["result"]["secondaryType"].lower()
goal_strength = play["result"]["strength"]["name"]
goal_eng = play["result"]["emptyNet"]
goal_period = play["about"]["period"]
goal_period_type = play["about"]["periodType"]
goal_period_ord = play["about"]["ordinalNum"]
goal_period_remain = play["about"]["periodTimeRemaining"]
try:
goal_distance = calculate_shot_distance(play)
except:
goal_distance = None
goal_score_away = play["about"]["goals"]["away"]
goal_score_home = play["about"]["goals"]["home"]
# Make number of goal lights equal number of goals
if game.preferred_team.home_away == "home":
goal_score_preferred = goal_score_home
goal_score_other = goal_score_away
else:
goal_score_preferred = goal_score_away
goal_score_other = goal_score_home
# Regulation Goal Announcements
if goal_period_type == "REGULAR":
if goal_strength != "Even":
goal_announce = "{} {} GOAL!".format(goal_team, goal_strength)
elif goal_eng:
goal_announce = "{} Empty Net GOAL!".format(goal_team)
else:
if goal_score_preferred == 7:
goal_announce = "{} TOUCHDOWN!".format(goal_team)
else:
goal_announce = "{} GOAL!".format(goal_team)
# Overtime goal announcements should be more exciting
else:
goal_announce = "{} OVERTIME GOAL!!".format(goal_team)
# Change some wording around to make it a bit more unique
# TODO: Add some randomness to this section
if goal_type == "deflected":
goal_scorer_text = ("{} ({}) deflects a shot past {} with {} left in the {} period!"
.format(goal_scorer_name, ordinal(goal_scorer_total), goalie_name,
goal_period_remain, goal_period_ord))
else:
if goal_distance is not None:
goal_scorer_text = ("{} scores ({}) on a {} from {} away "
"with {} left in the {} period!"
.format(goal_scorer_name, ordinal(goal_scorer_total), goal_type,
goal_distance, goal_period_remain, goal_period_ord))
else:
goal_scorer_text = ("{} scores ({}) on a {} "
"with {} left in the {} period!"
.format(goal_scorer_name, ordinal(goal_scorer_total), goal_type,
goal_period_remain, goal_period_ord))
# In order to pickup assists we need to just wait a bit longer
# Increasing or decreasing assist_break will change that wait time
if not assists:
# If this is the first check with an unassisted goal, wait & check again
# Only check twice since we can quote-tweet assists
if game.assists_check < 2:
game.assists_check += 1
return False
else:
logging.info("No assists found - goal may be unassisted.")
# goal_assist_text = "The goal is unassisted!"
goal_assist_text = ""
game.assists_check = 0
# If the assists array is populated (with one or two players), go ahead and move on
elif len(assists) == 1:
goal_assist_text = "Give the assist to {}!".format(assists[0])
else:
goal_assist_text = ("The goal was assisted by {} & {}."
.format(assists[0], assists[1]))
# Goal scored by Preferred Team
if goal_team == game.preferred_team.team_name:
# Check previous goal to see if we can skip this goal
if len(game.preferred_team.goals) == goal_score_preferred:
logging.warning("A duplicate goal was detected, skip this eventIdx!")
return True
# Count number of goals per game
goals_per_game = 1
preferred_goals = game.preferred_team.goals
for idx, goal in enumerate(preferred_goals):
if goal_scorer_name == preferred_goals[idx].scorer:
goals_per_game += 1
# Format Goal Scorer Text to include multi-goal games
if goals_per_game == 2:
goal_scorer_text = ("With his {} goal of the game, {}"
.format(ordinal(goals_per_game), goal_scorer_text))
elif goals_per_game == 3:
goal_scorer_text = "🎩🎩🎩 HAT TRICK! {}".format(goal_scorer_text)
elif goals_per_game > 3:
goal_scorer_text = "{} GOALS!! {}".format(goals_per_game, goal_scorer_text)
num_lights = goal_score_home if game.preferred_team.home_away == "home" else goal_score_away
# goal_lights_text = '🚨' * num_lights
goal_lights_text = '\U0001F6A8' * num_lights
team_hashtag = nhl_game_events.team_hashtag(game.preferred_team.team_name, game.game_type)
if not assists:
goal_text_player = "{}".format(goal_scorer_text)
else:
goal_text_player = "{} {}".format(goal_scorer_text, goal_assist_text)
goal_text_score = ("Score - {}: {} / {}: {}"
.format(game.preferred_team.short_name, goal_score_preferred,
game.other_team.short_name, goal_score_other))
goal_text_full = ("{} {}\n\n{}\n\n{}\n\n{} {}"
.format(goal_announce, goal_lights_text, goal_text_player,
goal_text_score, team_hashtag, game.game_hashtag))
goal_tweet = send_tweet(goal_text_full) if recent_event(play) else None
if args.discord:
send_discord(CHANNEL_ID, goal_text_full)
# Create Goal Object & append to Team goals array
goal = nhl_game_events.Goal(goal_description, goal_eventidx, goal_period, goal_period_type,
goal_period_ord, goal_period_remain, goal_score_home,
goal_score_away, goal_team, goal_type, goal_strength,
goal_eng, goal_scorer_name, assists, goal_tweet)
game.preferred_team.goals.append(goal)
# Goal was scored by Other Team
else:
num_thumbs = goal_score_home if game.other_team.home_away == "home" else goal_score_away
goal_thumbs_text = '\U0001F44E' * num_thumbs
goal_announce = "{} scored. {}".format(goal_team, goal_thumbs_text)
goal_text_player = ("{} ({}) - {} left in the {} period."
.format(goal_scorer_name, ordinal(goal_scorer_total),
goal_period_remain, goal_period_ord))
goal_text_score = ("Score - {}: {} / {}: {}"
.format(game.preferred_team.short_name, goal_score_preferred,
game.other_team.short_name, goal_score_other))
goal_other_tweet = ("{}\n\n{}\n\n{}\n\n{}"
.format(goal_announce, goal_text_player,
goal_text_score, game.game_hashtag))
if recent_event(play):
goal_tweet = send_tweet(goal_other_tweet)
if args.discord:
send_discord(CHANNEL_ID, goal_other_tweet)
return True
def parse_shootout_event(play, game):
"""Parses attributes of a shootout event and tweets out the result.
Args:
play (dict): A dictionary of a penalty play attributes.
game (Game): The current game instance.
period (Period): The current period instance.
"""
for player in play["players"]:
if player["playerType"] == "Goalie":
goalie_name = player["player"]["fullName"]
# Covers Shots & Goals
else:
shooter_name = player["player"]["fullName"]
shootout_team = play["team"]["name"]
shootout_event = play["result"]["eventTypeId"]
shootout_emoji = "\U00002705" if shootout_event == "GOAL" else "\U0000274C"
logging.info("Shootout event (%s - %s) detected for %s.",
shootout_event, shootout_emoji, shootout_team)
# Preferred Team is shooting
if shootout_team == game.preferred_team.team_name:
game.shootout.preferred_score.append(shootout_emoji)
if shootout_event == "GOAL":
shootout_event_text = "{} shoots & scores! \U0001F6A8".format(shooter_name)
elif shootout_event == "SHOT":
shootout_event_text = ("{}'s shot saved by {}. \U0001F620"
.format(shooter_name, goalie_name))
else:
shootout_event_text = "{} shoots & misses. \U0001F620".format(shooter_name)
# Other Team is shooting
else:
game.shootout.other_score.append(shootout_emoji)
if shootout_event == "GOAL":
shootout_event_text = "{} shoots & scores. \U0001F620".format(shooter_name)
elif shootout_event == "SHOT":
shootout_event_text = ("{}'s shot saved by {}! \U0001F645\U0000200D\U00002642\U0000FE0F"
.format(shooter_name, goalie_name))
else:
shootout_event_text = ("{} shoots & misses. \U0001F645\U0000200D\U00002642\U0000FE0F"
.format(shooter_name))
shootout_preferred_score = " - ".join(game.shootout.preferred_score)
shootout_other_score = " - ".join(game.shootout.other_score)
shootout_score_text = ("{}: {}\n{}: {}"
.format(game.preferred_team.short_name, shootout_preferred_score,
game.other_team.short_name, shootout_other_score))
shootout_tweet_text = ("{}\n\n{}\n\n{}"
.format(shootout_event_text, shootout_score_text, game.game_hashtag))
send_tweet(shootout_tweet_text)
if args.discord:
send_discord(CHANNEL_ID, shootout_tweet_text)
# Increment Shootout Shots
game.shootout.shots = game.shootout.shots + 1
def parse_missed_shot(play, game):
"""Parses attributes of a missed shot (post / crossbar) and tweets out the result.
Args:
play (dict): A dictionary of a penalty play attributes.
game (Game): The current game instance.
"""
shot_team = play["team"]["name"]
if shot_team != game.preferred_team.team_name:
return False
shot_description = play["result"]["description"].lower()
if "crossbar" in shot_description:
shot_hit = "crossbar"
elif "goalpost" in shot_description:
shot_hit = "post"
else:
logging.info("The preferred team missed a shot, but didn't hit the post.")
return False
logging.info("The preferred team hit a post or crossbar - find distance & tweet it.")
shooter = play['players'][0]['player']['fullName']
shot_period_ord = play["about"]["ordinalNum"]
shot_period_remain = play["about"]["periodTimeRemaining"]
shot_x = abs(play['coordinates']['x'])
shot_y = play['coordinates']['y']
approx_goal_x = 89
approx_goal_y = 0
shot_dist = math.ceil(math.hypot(shot_x - approx_goal_x, shot_y - approx_goal_y))
# shot_dist = abs(math.ceil(approx_goal_x - shot_x))
if shot_dist == 1:
shot_dist_unit = 'foot'
else:
shot_dist_unit = 'feet'
game_hashtag = game.game_hashtag
preferred_hashtag = nhl_game_events.team_hashtag(game.preferred_team.team_name, game.game_type)
shot_tweet_text = (f'DING! 🛎\n\n{shooter} hits the {shot_hit} from {shot_dist} {shot_dist_unit} '
f'away with {shot_period_remain} remaining in the {shot_period_ord} period.'
f'\n\n{preferred_hashtag} {game_hashtag}')
send_tweet(shot_tweet_text)
if args.discord:
send_discord(CHANNEL_ID, shot_tweet_text)
def check_tvtimeout(play, game):
logging.info("Recent stoppage detected - wait 10 seconds & check if this is a TV Timeout.")
time.sleep(10)
# Check if a Stoppage is a TV Timeout
html_plays = get_html_report(game)
last_play = html_plays[-1]
last_play_details = last_play.find_all('td', class_='bborder')
event_description = last_play_details[5].text
logging.info('Last HTML Event Description - %s', event_description)
if "tv timeout" in event_description.lower():
period_ordinal = play["about"]["ordinalNum"]
period_remaining = play["about"]["periodTimeRemaining"]
game_hashtag = game.game_hashtag
tv_timeout_tweet = (f'Heading to a TV Timeout with {period_remaining} '
f'remaining in the {period_ordinal} period.'
f'\n\n{game_hashtag}')
send_tweet(tv_timeout_tweet)
if args.discord:
send_discord(CHANNEL_ID, tv_timeout_tweet)
def check_scoring_changes(previous_goals, game):
"""
Loops through previously scored goals & determins if a scoring change has occurred.
Args:
previous_goals (list): A list of old goals (contains dictionary)
game (Game): The current game instance.
"""
preferred_goals = game.preferred_team.goals
for idx, previous_goal in enumerate(previous_goals):
assists = []
for player in previous_goal["players"]:
if player["playerType"] == "Scorer":
goal_scorer_name = player["player"]["fullName"]
elif player["playerType"] == "Assist":
player_name = player["player"]["fullName"]
assist_total = player["seasonTotal"]
assists.append(f'{player_name} ({assist_total})')
# Check for changes in existing goal array
if goal_scorer_name != preferred_goals[idx].scorer:
logging.info("Goal scorer change detected for goal #%s.", idx)
# goal_tweet = preferred_goals[idx].tweet
goal_tweet = "{}{}".format(TWITTER_URL, preferred_goals[idx].tweet)
goal_scorechange_announce = "Scoring change on the below goal."
if not assists:
goal_scorechange_text = ("Now reads as an unassisted goal for {}."
.format(goal_scorer_name))
elif len(assists) == 1:
goal_scorechange_text = ("Now reads as {} from {}."
.format(goal_scorer_name, assists[0]))
else:
goal_scorechange_text = ("Now reads as {} from {} and {}."
.format(goal_scorer_name, assists[0], assists[1]))
# Use this to quote tweet
goal_scorechange_tweet = ("{} {} {}\n{}"
.format(goal_scorechange_announce, goal_scorechange_text,
game.game_hashtag, goal_tweet))
goal_scorechange_tweeturl = send_tweet(goal_scorechange_tweet)
if args.discord:
send_discord(CHANNEL_ID, goal_scorechange_tweet)
# Adjust the values of the array with the changed ones
preferred_goals[idx].scorer = goal_scorer_name
preferred_goals[idx].assists = assists
preferred_goals[idx].tweet = goal_scorechange_tweeturl
# This is used when ONLY the assists change.
elif assists != preferred_goals[idx].assists:
logging.info("Assists added or changed for goal #%s.", idx)
logging.info("New assists - %s", assists)
goal_tweet = "{}{}".format(TWITTER_URL, preferred_goals[idx].tweet)
# Original goal has no assists, just tweet that indication.
if not preferred_goals[idx].assists:
if len(assists) == 1:
goal_assistchange_text = ("Give the lone assist on {}'s goal to {}."
.format(preferred_goals[idx].scorer, assists[0]))
elif len(assists) == 2:
goal_assistchange_text = ("The goal is now assisted by {} and {}."
.format(assists[0], assists[1]))
else:
goal_assistchange_text = "The goal is now unassisted."
# Use this to quote tweet
goal_assistchange_tweet = ("{} {}\n{}"
.format(goal_assistchange_text,
game.game_hashtag, goal_tweet))
goal_assistchange_url = send_tweet(goal_assistchange_tweet)
if args.discord:
send_discord(CHANNEL_ID, goal_assistchange_tweet)
# Assists on the original goal have changed, quote tweet that with different wording.
else:
goal_assistchange_announce = "The assists on the below goal have changed."
if len(assists) == 1:
goal_assistchange_text = ("Give the lone assist on {}'s goal to {}."
.format(preferred_goals[idx].scorer, assists[0]))
elif len(assists) == 2:
goal_assistchange_text = ("The goal is now assisted by {} and {}."
.format(assists[0], assists[1]))
else:
goal_assistchange_text = "The goal is now unassisted."
# Use this to quote tweet
goal_assistchange_tweet = ("{} {} {}\n{}"
.format(goal_assistchange_announce,
goal_assistchange_text, game.game_hashtag,
goal_tweet))
goal_assistchange_url = send_tweet(goal_assistchange_tweet)
if args.discord:
send_discord(CHANNEL_ID, goal_assistchange_tweet)
# Then, adjust the values of the array with the changed ones
preferred_goals[idx].scorer = goal_scorer_name
preferred_goals[idx].assists = assists
preferred_goals[idx].tweet = goal_assistchange_url
else:
logging.info("No scoring change detected for goal #%s.", idx)
def get_game_events(game):
"""
Queries the NHL Live Feed API endpoint and returns a JSON object.
Input:
game - current game as a Game object
Output:
live_feed_json - JSON object of live feed results
"""
try:
live_feed_json = req_session.get(game.live_feed).json()
except requests.exceptions.RequestException:
logging.error("Game Events request (%s) timed out!", game.live_feed)
return None
if args.localdata:
live_feed_json = json.load(open('localdata/sample-data.json'))
# Update all object attributes (game, period & teams)
update_object_attributes(live_feed_json, game)
# Return a JSON object of all game events
return live_feed_json
def loop_game_events(json_feed, game):
"""
Takes a JSON object of game events & parses for events.
Input:
json - JSON object of live feed results (usually from get_game_events)
game - current game as a Game object
Ouput:
None
"""
# Logic for preferred / other team objects via if statement
# (preferred_team, other_team) = nhl_game_events.preferred_teams(home_team, away_team)
all_plays = json_feed["liveData"]["plays"]["allPlays"]
linescore = json_feed["liveData"]["linescore"]
shootout = True if linescore["currentPeriodOrdinal"] == "SO" else False
logging.info("Game Event Shootout Loop - %s", shootout)
# Subset all_plays dictionary by last_event_idx to shorten loop
next_event = game.last_event_idx + 1
new_plays = all_plays[next_event:]
# If there are no new plays, use this loop to check for scoring changes
if not new_plays:
previous_goals = []
preferred_team_name = game.preferred_team.team_name
previous_plays = all_plays[:game.last_event_idx]
for previous_play in previous_plays:
prev_event_type = previous_play["result"]["eventTypeId"]
prev_event_period = previous_play["about"]["ordinalNum"]
if prev_event_type == "GOAL" and prev_event_period != "SO":
prev_event_team = previous_play["team"]["name"]
if prev_event_team == preferred_team_name:
previous_goals.append(previous_play)
# This can happen if the Game Bot misses a goal - parse the last goal in all_plays
if len(previous_goals) > len(game.preferred_team.goals):
logging.info("Goal discrepancy detected - parsing previous goal.")
last_goal = previous_goals[-1]
parse_regular_goal(last_goal, game)
return
# Send array into scoring changes function
logging.info("No new events detected - going to check for scoring changes.")
check_scoring_changes(previous_goals, game)
return
# For completeness, print event ID & type in our detection line
if len(new_plays) < 10:
new_plays_shortlist = list()
for play in new_plays:
event_type = play["result"]["eventTypeId"]
event_idx = play["about"]["eventIdx"]
short_list_play = "{}: {}".format(event_idx, event_type)
new_plays_shortlist.append(short_list_play)
logging.info("%s new event(s) detected - looping through them now. %s", \
len(new_plays), new_plays_shortlist)
else:
logging.info("%s new event(s) detected - looping through them now.", len(new_plays))
# Loop through any new plays to check for events
for play in new_plays:
event_type = play["result"]["eventTypeId"]
event_idx = play["about"]["eventIdx"]
event_description = play["result"]["description"]
event_period = play["about"]["period"]
event_period_ordinal = play["about"]["ordinalNum"]
period_type = play["about"]["periodType"]
# Parse each play by checking it's event_type and parsing information if needed
# if event_type == "PERIOD_READY" and event_period == 1:
if (event_type == "PERIOD_READY" and game.period.current == 1):
preferred_team = game.preferred_team
preferred_homeaway = preferred_team.home_away
on_ice = json_feed["liveData"]["boxscore"]["teams"][preferred_homeaway]["onIce"]
# players = json_feed["liveData"]["boxscore"]["teams"][preferred_homeaway]["players"]
players = json_feed["gameData"]["players"]
if recent_event(play):
get_lineup(game, event_period, on_ice, players)
elif event_type == "PERIOD_READY" and event_period == 4 and game.game_type in ("PR", "R"):
preferred_team = game.preferred_team
preferred_homeaway = preferred_team.home_away
on_ice = json_feed["liveData"]["boxscore"]["teams"][preferred_homeaway]["onIce"]
# players = json_feed["liveData"]["boxscore"]["teams"][preferred_homeaway]["players"]
players = json_feed["gameData"]["players"]
if recent_event(play):
get_lineup(game, event_period, on_ice, players)
elif event_type == "PERIOD_READY" and event_period > 3 and game.game_type == "P":
logging.info("Playoff overtime detected.")
preferred_team = game.preferred_team
preferred_homeaway = preferred_team.home_away
on_ice = json_feed["liveData"]["boxscore"]["teams"][preferred_homeaway]["onIce"]
# players = json_feed["liveData"]["boxscore"]["teams"][preferred_homeaway]["players"]
players = json_feed["gameData"]["players"]
if recent_event(play):
get_lineup(game, event_period, on_ice, players)
# elif event_type == "PERIOD_READY" and event_period == 5 and game.game_type in ("PR", "R"):
# tweet_text = ("The shootout is ready to begin at {}!\n\n{}"
# .format(game.venue, game.game_hashtag))
elif event_type == "PERIOD_START":
if event_period == 1:
tweet_text = ("The puck has dropped between the {} & {} at {}!\n\n{}"
.format(game.preferred_team.short_name, game.other_team.short_name,
game.venue, game.game_hashtag))
elif event_period in (2, 3):
tweet_text = ("It's time for the {} period at {}.\n\n{}"
.format(event_period_ordinal, game.venue,
game.game_hashtag))
elif event_period == 4 and game.game_type in ("PR", "R"):
tweet_text = ("Who will be the hero this time? 3-on-3 OT starts now at {}!\n\n{}"
.format(game.venue, game.game_hashtag))
elif event_period > 3 and game.game_type == "P":
ot_period = event_period - 3
tweet_text = ("Who will be the hero this time? OT{} starts now at {}!\n\n{}"
.format(ot_period, game.venue, game.game_hashtag))
elif event_period == 5 and game.game_type in ("PR", "R"):
tweet_text = ("The shootout is underway at {}!\n\n{}"
.format(game.venue, game.game_hashtag))
if recent_event(play):
send_tweet(tweet_text)
if args.discord:
send_discord(CHANNEL_ID, tweet_text)
elif event_type == "PERIOD_END":
if event_period in (1, 2):
# Calculate win percentage when winning / trailing after period
pref_score = game.preferred_team.score
other_score = game.other_team.score
if pref_score > other_score:
if event_period == 1:
lead_trail_stat = game.preferred_team.lead_trail_lead1P
elif event_period == 2:
lead_trail_stat = game.preferred_team.lead_trail_lead2P
lead_trail_text = ("When leading after the {} period, the {} are {}."
.format(event_period_ordinal, game.preferred_team.short_name,
lead_trail_stat))
elif pref_score < other_score:
if event_period == 1:
lead_trail_stat = game.preferred_team.lead_trail_trail1P
elif event_period == 2:
lead_trail_stat = game.preferred_team.lead_trail_trail2P
lead_trail_text = ("When trailing after the {} period, the {} are {}."
.format(event_period_ordinal, game.preferred_team.short_name,
lead_trail_stat))
else:
lead_trail_text = None
# Build end of period tweet & image
boxscore = json_feed["liveData"]["boxscore"]["teams"]
boxscore_away = boxscore["away"]
boxscore_home = boxscore["home"]
boxscore_preferred = boxscore_home if game.home_team.preferred else boxscore_away
boxscore_other = boxscore_away if game.home_team.preferred else boxscore_home
img = stats_image_generator(game, "intermission", boxscore_preferred, boxscore_other)
img_shotmap = hockey_bot_imaging.image_generator_shotmap(game, all_plays)
shotmap_tweet_text = f'A look at the "all situations" shotmap after the {event_period_ordinal} period.\n\n{game.game_hashtag}'
if args.notweets:
img.show()
img_shotmap.show()
else:
img_filename = (os.path.join(PROJECT_ROOT, 'resources/images/GamedayIntermission-{}-{}.png'
.format(event_period, game.preferred_team.games + 1)))
img.save(img_filename)
img_shotmap_filename = (os.path.join(PROJECT_ROOT, 'resources/images/RinkShotmap-{}-{}.png'
.format(event_period, game.preferred_team.games + 1)))
img_shotmap.save(img_shotmap_filename)
if lead_trail_text is None:
# tweet_text = ("The {} period of {} comes to an end.\n\n"
# "{}: {} ({} shots)\n{}: {} ({} shots)"
# .format(event_period_ordinal, game.game_hashtag,
# game.preferred_team.short_name, game.preferred_team.score,
# game.preferred_team.shots, game.other_team.short_name,
# game.other_team.score, game.other_team.shots))
tweet_text = ("The {} period of {} comes to an end."
.format(event_period_ordinal, game.game_hashtag))
else:
tweet_text = ("The {} period of {} comes to an end. {}"
.format(event_period_ordinal, game.game_hashtag, lead_trail_text))
if recent_event(play):
api = get_api()
api.update_with_media(img_filename, tweet_text)
api.update_with_media(img_shotmap_filename, shotmap_tweet_text)
if args.discord:
send_discord(CHANNEL_ID, tweet_text, img_filename)
send_discord(CHANNEL_ID, shotmap_tweet_text, img_shotmap_filename)
# 1st and 2nd intermission is 18 minutes - sleep for that long
linescore = json_feed["liveData"]["linescore"]
intermission_remain = linescore["intermissionInfo"]["intermissionTimeRemaining"]
logging.info("Intermission Remaining: %s -- " \
"sleep for 60 seconds less.", intermission_remain)
intermission_sleep = intermission_remain - 60
if intermission_sleep > 60:
time.sleep(intermission_sleep)
elif event_period == 3 and (game.preferred_team.score == game.other_team.score):
tweet_text = ("60 minutes wasn't enough to decide this game. "
"{} and {} headed to overtime tied at {}.\n\n{}"
.format(game.preferred_team.short_name, game.other_team.short_name,
game.preferred_team.score, game.game_hashtag))
if recent_event(play):
send_tweet(tweet_text)
if args.discord:
send_discord(CHANNEL_ID, tweet_text)
elif event_period > 3 and (game.preferred_team.score == game.other_team.score) and game.game_type == "P":
ot_period = event_period - 3
next_ot = ot_period + 1
ot_string = "overtime wasn't" if ot_period == 1 else "overtimes weren't"
tweet_text = ("{} {} to decide this game. "
"{} and {} headed to OT{} tied at {}.\n\n{}"
.format(ot_period, ot_string, game.preferred_team.short_name, game.other_team.short_name,
next_ot, game.preferred_team.score, game.game_hashtag))
if recent_event(play):
send_tweet(tweet_text)
if args.discord:
send_discord(CHANNEL_ID, tweet_text)
elif event_type == "PENALTY":
if recent_event(play):
parse_penalty(play, game)
elif event_type == "GOAL" and period_type != "SHOOTOUT":
assists_check_done = parse_regular_goal(play, game)
while not assists_check_done:
# Get events from API & parse single event again
game_events_recheck = get_game_events(game)
assist_event = game_events_recheck["liveData"]["plays"]["allPlays"][event_idx]
assists_check_done = parse_regular_goal(assist_event, game)
# Sleep for 4 seconds (enough time to get assists)
time.sleep(4)
game.assists_check = 0
elif event_type in ("GOAL", "SHOT", "MISSED_SHOT") and period_type == "SHOOTOUT":
parse_shootout_event(play, game)
elif event_type == "MISSED_SHOT" and period_type != "SHOOTOUT":
if recent_event(play):
parse_missed_shot(play, game)
# This code is not reliable enough - commenting out for now
# elif event_type == "STOP":
# if recent_event(play):
# check_tvtimeout(play, game)
else:
logging.debug("Other event: %s - %s", event_type, event_description)
# For each loop iteration, update the eventIdx in the game object
game.last_event_idx = event_idx
def get_html_report(game):
game_id = game.game_id_html
pbp_url = f'http://www.nhl.com/scores/htmlreports/20182019/PL{game_id}.HTM'
logging.info('Going to get HTML Report - %s', pbp_url)
pbp = requests.get(pbp_url)
pbp_soup = BeautifulSoup(pbp.content, 'lxml')
all_plays = pbp_soup.find_all('tr', class_='evenColor')
return all_plays
def parse_end_of_game(json_feed, game):
"""
Takes a JSON object of game events & parses for events.
Input:
json - JSON object of live feed results (usually from get_game_events)
game - current game as a Game object
Ouput:
None
"""
all_plays = json_feed["liveData"]["plays"]["allPlays"]
all_players = json_feed["gameData"]["players"]
boxscore = json_feed["liveData"]["boxscore"]["teams"]
decisions = json_feed["liveData"]["decisions"]
# Once available, build the final score tweet & send it.
boxscore_away = boxscore["away"]
boxscore_home = boxscore["home"]
boxscore_preferred = boxscore_home if game.home_team.preferred else boxscore_away
boxscore_other = boxscore_away if game.home_team.preferred else boxscore_home
preferred_home_text = "on the road" if game.preferred_team.home_away == "away" else "at home"
preferred_hashtag = nhl_game_events.team_hashtag(game.preferred_team.team_name, game.game_type)
perferred_final_score = game.preferred_team.score
other_final_score = game.other_team.score
if perferred_final_score > other_final_score:
final_score_text = ("{} win {} over the {} by a score of {} to {}! 🚨🚨🚨"
.format(game.preferred_team.short_name, preferred_home_text,
game.other_team.short_name, game.preferred_team.score,
game.other_team.score))
else:
final_score_text = ("{} lose {} to the {} by a score of {} to {}. \U0001F44E"
.format(game.preferred_team.short_name, preferred_home_text,
game.other_team.short_name, game.preferred_team.score,
game.other_team.score))
# Get next game on the schedule (bottom of the final tweet)
try:
pref_team_id = game.preferred_team.team_id
next_game_url = f'{NHLAPI_BASEURL}/api/v1/teams/{pref_team_id}?expand=team.schedule.next'
logging.info(f"Going to get next game information via URL - {next_game_url}")
next_game_json = req_session.get(next_game_url).json()
next_game_sched = next_game_json.get('teams')[0].get('nextGameSchedule')
next_game = next_game_sched.get('dates')[0].get('games')[0]
# Commands used to calculate time related attributes
localtz = dateutil.tz.tzlocal()
localoffset = localtz.utcoffset(datetime.now(localtz))
next_game_date = next_game.get('gameDate')
next_game_datetime = datetime.strptime(next_game_date, '%Y-%m-%dT%H:%M:%SZ')
next_game_datetime_local = next_game_datetime + localoffset
next_game_date_string = datetime.strftime(next_game_datetime_local, '%A %B %d @ %I:%M%p')
# Get the Opponent for Next Game
next_game_teams = next_game.get('teams')
next_game_home = next_game_teams.get('home')
next_game_away = next_game_teams.get('away')
if next_game_home.get('team').get('id') == pref_team_id:
next_game_opponent = next_game_away.get('team').get('name')
else:
next_game_opponent = next_game_home.get('team').get('name')
next_game_venue = next_game.get('venue').get('name')
next_game_text = (f'Next Game: {next_game_date_string} vs. {next_game_opponent}'
f' (at {next_game_venue})!')
except:
logging.warning('NHL API returned an incorrect response.')
next_game_text = ''
# Generate Final Image
# img = final_image(game, boxscore_preferred, boxscore_other)
img = stats_image_generator(game, "final", boxscore_preferred, boxscore_other)
final_score_tweet = ("{} {} {}\n\n{}"
.format(final_score_text, preferred_hashtag, game.game_hashtag,
next_game_text))
if game.finaltweets["finalscore"] is False:
# Set the Image Filename & Save it
img_filename = (os.path.join(PROJECT_ROOT, 'resources/images/GamedayFinal-{}.png'
.format(game.preferred_team.games + 1)))
img.save(img_filename)
if args.notweets:
img.show()
logging.info("%s", final_score_tweet)
else:
api = get_api()
api.update_with_media(img_filename, final_score_tweet)
if args.discord:
logging.info("Sending Image & Message to Discord: %s", final_score_tweet)
send_discord(CHANNEL_ID, final_score_tweet, img_filename)
game.finaltweets["finalscore"] = True
# Once available, build the 3-stars tweet & send it.
try:
logging.info("Checking for the 3-stars of the game.")
first_star_id = "ID{}".format(decisions["firstStar"]["id"])
second_star_id = "ID{}".format(decisions["secondStar"]["id"])
third_star_id = "ID{}".format(decisions["thirdStar"]["id"])
first_star_name = decisions["firstStar"]["fullName"]
second_star_name = decisions["secondStar"]["fullName"]
third_star_name = decisions["thirdStar"]["fullName"]
first_star_tricode = all_players[first_star_id]["currentTeam"]["triCode"]
second_star_tricode = all_players[second_star_id]["currentTeam"]["triCode"]
third_star_tricode = all_players[third_star_id]["currentTeam"]["triCode"]
first_star_full = "{} ({})".format(first_star_name, first_star_tricode)
second_star_full = "{} ({})".format(second_star_name, second_star_tricode)
third_star_full = "{} ({})".format(third_star_name, third_star_tricode)
stars_text = ("⭐️: {}\n⭐️⭐️: {}\n⭐️⭐️⭐️: {}"
.format(first_star_full, second_star_full, third_star_full))
stars_tweet = ("The three stars for the game are - \n{}\n\n{}"
.format(stars_text, game.game_hashtag))
if game.finaltweets["stars"] is False:
if args.notweets:
logging.info("%s", stars_tweet)
else:
send_tweet(stars_tweet)
if args.discord:
send_discord(CHANNEL_ID, stars_tweet)
game.finaltweets["stars"] = True
except KeyError:
logging.info("3-stars have not yet posted - try again in next iteration.")
# Generate Shotmap & Send Tweet
if game.finaltweets["shotmap"] is False:
img_shotmap = hockey_bot_imaging.image_generator_shotmap(game, all_plays)
shotmap_tweet_text = f'Final shot map of the game.'
img_shotmap_filename = (os.path.join(PROJECT_ROOT, 'resources/images/RinkShotmap-Final-{}.png'
.format(game.preferred_team.games + 1)))
img_shotmap.save(img_shotmap_filename)
if args.notweets:
img_shotmap.show()
logging.info("%s", shotmap_tweet_text)
else:
api = get_api()
api.update_with_media(img_shotmap_filename, shotmap_tweet_text)
if args.discord:
send_discord(CHANNEL_ID, shotmap_tweet_text, img_shotmap_filename)
game.finaltweets["shotmap"] = True
# Perform Opposition Stats
if game.finaltweets["opposition"] is False:
try:
nss_opposition, nss_opposition_byline = advanced_stats.nss_opposition(game, game.preferred_team)
# If both return values are False, it means the lines aren't confirmed
if nss_opposition is False and nss_opposition_byline is False:
tweet_text = (f'The bot could not programatically find the confirmed lines for tonight.'
f'Due to this no advanced stats will be posted.'
f'\n\n{preferred_hashtag} {game.game_hashtag}')
send_tweet(tweet_text)
if args.discord:
send_discord(CHANNEL_ID, tweet_text)
# Skip the remainder of the functions by setting retries & tweet array values
# Then raise an Exception to skip the rest of the below
game.finaltweets["advstats"] = True
game.finaltweets["opposition"] = True
game.finaltweets_retry == 3
raise ValueError('Advanced stats cannot be performed with no lines!')
# If the above criteria is not met, the bot can do the rest of the advanced stats
opposition_tweet_text = (f'{game.preferred_team.team_name} Primary Opposition\n'
f'(via @NatStatTrick)')
img = hockey_bot_imaging.image_generator_nss_opposition(nss_opposition_byline)
img_filename = os.path.join(PROJECT_ROOT,
'resources/images/GamedayAdvStats-{}.png'
.format(game.preferred_team.games + 1))
img.save(img_filename)
if args.notweets:
img.show()
logging.info("%s", opposition_tweet_text)
else:
api = get_api()
api.update_with_media(img_filename, opposition_tweet_text)
if args.discord:
send_discord(CHANNEL_ID, opposition_tweet_text, img_filename)
game.finaltweets["opposition"] = True
except Exception as e:
logging.error(e)
if game.finaltweets_retry == 3:
logging.warning('Maximum of 3 retries exceeded - setting opposition to True.')
game.finaltweets["opposition"] = True
# Perform Line-By-Line Advanced Stats
if game.finaltweets["advstats"] is False:
try:
# Run the Advanced Stats Function
nss_linetool, nss_linetool_dict = advanced_stats.nss_linetool(game, game.preferred_team)
# nss_linetool = advanced_stats.nss_linetool(game, game.preferred_team)
if nss_linetool is False or nss_linetool_dict is False:
raise IndexError('Line tool not yet available for this game - try again shortly.')
adv_stats_tweet_text = (f'{game.preferred_team.team_name} Advanced Stats\n'
f'(via @NatStatTrick)')
img = hockey_bot_imaging.image_generator_nss_linetool(nss_linetool_dict)
img_filename = os.path.join(PROJECT_ROOT,
'resources/images/GamedayAdvStats-{}.png'
.format(game.preferred_team.games + 1))
img.save(img_filename)
if args.notweets:
img.show()
logging.info("%s", adv_stats_tweet_text)
else:
api = get_api()
api.update_with_media(img_filename, adv_stats_tweet_text)
if args.discord:
send_discord(CHANNEL_ID, adv_stats_tweet_text, img_filename)
game.finaltweets["advstats"] = True
except Exception as e:
logging.error(e)
if game.finaltweets_retry == 3:
logging.warning('Maximum of 5 retries exceeded - setting advstats to True.')
game.finaltweets["advstats"] = True
all_tweets_sent = all(value is True for value in game.finaltweets.values())
logging.info("All Tweets Info: %s", game.finaltweets)
# Increment Final Tweets Retry Counter
game.finaltweets_retry += 1
return all_tweets_sent
def game_preview(game):
"""
Runs when the game is in preview state and it is not yet game time.
Input:
game - current game as a Game object
Output:
None
"""
logging.info("Game Date (UTC) - %s", game.date_time)
logging.info("Game Date (LCL) - %s", game.game_time_local)
# Get preferred & other team from Game object
(preferred_team, other_team) = game.get_preferred_team()
pref_team_homeaway = game.preferred_team.home_away
# Format & send preview tweet
clock_emoji = nhl_game_events.clock_emoji(game.game_time_local)
if game.game_type == "P":
preview_text_teams = (
"Tune in {} for Game #{} when the {} take on the {} at {}."
.format(game.game_time_of_day, game.game_id_playoff_game ,preferred_team.team_name, other_team.team_name, game.venue)
)
else:
preview_text_teams = (
"Tune in {} when the {} take on the {} at {}."
.format(game.game_time_of_day, preferred_team.team_name, other_team.team_name, game.venue)
)
preview_text_emojis = (
"{}: {}\n\U0001F4FA: {}\n\U00000023\U0000FE0F\U000020E3: {}"
.format(clock_emoji, game.game_time_local, preferred_team.tv_channel, game.game_hashtag)
)
preview_tweet_text = "{}\n\n{}".format(preview_text_teams, preview_text_emojis)
# logging.info("[TWEET] \n%s", preview_tweet_text)
# Sleep script until game time.
# Get Team Hashtags
pref_hashtag = nhl_game_events.team_hashtag(game.preferred_team.team_name, game.game_type)
other_hashtag = nhl_game_events.team_hashtag(game.other_team.team_name, game.game_type)
# Get Season Series
season_series_strings = nhl_game_events.season_series(game.game_id, game.preferred_team,
game.other_team)
season_series_str = season_series_strings[0]
if season_series_str is None:
season_series_tweet = ("This is the first meeting of the season between "
"the {} & the {}.\n\n{} {} {}"
.format(game.preferred_team.short_name, game.other_team.short_name,
pref_hashtag, other_hashtag, game.game_hashtag))
else:
points_leader_str = season_series_strings[1]
toi_leader_str = season_series_strings[2]
if game.game_type == "P":
# season_series_str = season_series_str.replace("season series", "regular season series")
season_series_str = "Regular Season Stats -\n\n{}".format(season_series_str)
season_series_tweet = ("{}\n{}\n{}\n\n{} {} {}"
.format(season_series_str, points_leader_str, toi_leader_str,
pref_hashtag, other_hashtag, game.game_hashtag))
# img = preview_image(game)
img = pregame_image(game)
if args.discord:
if img is not None:
img_filename = os.path.join(PROJECT_ROOT, 'resources/images/Gameday-{}.png'.format(game.preferred_team.games + 1))
img.save(img_filename)
send_discord(CHANNEL_ID, preview_tweet_text, img_filename)
else:
send_discord(CHANNEL_ID, preview_tweet_text)
if args.notweets:
lineups = nhl_game_events.fantasy_lab_lines(game, game.preferred_team)
lineups_confirmed = lineups.get('confirmed')
officials = other_game_info.scouting_the_refs(game, game.preferred_team)
officials_confirmed = officials.get('confirmed')
goalies = other_game_info.dailyfaceoff_goalies(
preferred_team, other_team, pref_team_homeaway)
img.show()
logging.info("%s", preview_tweet_text)
if lineups_confirmed:
fwd_def_lines_tweet = lineups.get('fwd_def_lines_tweet')
power_play_lines_tweet = lineups.get('power_play_lines_tweet', 'N/A')
logging.info("%s", fwd_def_lines_tweet)
logging.info("%s", power_play_lines_tweet)
if args.discord:
send_discord(CHANNEL_ID, fwd_def_lines_tweet)
send_discord(CHANNEL_ID, power_play_lines_tweet)
if officials_confirmed:
logging.info("%s", officials.get('tweet'))
if args.discord:
send_discord(CHANNEL_ID, officials.get('tweet'))
pref_goalie_tweet_text = goalies.get('pref_goalie')
other_goalie_tweet_text = goalies.get('other_goalie')
pref_goalie_tweet = (f'Projected {game.game_hashtag} Goalie '
f'for {pref_hashtag}:\n{pref_goalie_tweet_text}')
other_goalie_tweet = (f'Projected {game.game_hashtag} Goalie '
f'for {other_hashtag}:\n{other_goalie_tweet_text}')
logging.info("%s", pref_goalie_tweet)
logging.info("%s", other_goalie_tweet)
logging.info("%s", season_series_tweet)
if args.discord:
send_discord(CHANNEL_ID, pref_goalie_tweet)
send_discord(CHANNEL_ID, other_goalie_tweet)
send_discord(CHANNEL_ID, season_series_tweet)
logging.info("Since we are not sending tweets, just sleep until game time.")
time.sleep(game.game_time_countdown)
else:
if img is not None:
img_filename = os.path.join(PROJECT_ROOT, 'resources/images/Gameday-{}.png'.format(game.preferred_team.games + 1))
img.save(img_filename)
api = get_api()
image_tweet = api.update_with_media(img_filename, preview_tweet_text)
image_tweet_id = image_tweet.id_str
game.pregame_lasttweet = image_tweet_id
else:
image_tweet_id = send_tweet(preview_tweet_text)
if args.discord:
send_discord(CHANNEL_ID, preview_tweet_text)
# Send Season Series tweet (only tweet not waiting on confirmation)
game.pregame_lasttweet = send_tweet(season_series_tweet, reply=game.pregame_lasttweet)
if args.discord:
send_discord(CHANNEL_ID, season_series_tweet)
while True:
if not game.pregametweets['goalies_pref'] or not game.pregametweets['goalies_other']:
goalie_confirm_list = ('Confirmed', 'Likely')
# Get Goalies from Daily Faceoff
goalies = other_game_info.dailyfaceoff_goalies(
preferred_team, other_team, pref_team_homeaway)
pref_goalie_tweet_text = goalies.get('pref_goalie')
other_goalie_tweet_text = goalies.get('other_goalie')
pref_goalie_confirm_text = goalies.get('pref_goalie_confirm')
other_goalie_confirm_text = goalies.get('other_goalie_confirm')
# Convert confirmations into True / False
pref_goalie_confirm = bool(pref_goalie_confirm_text in goalie_confirm_list)
other_goalie_confirm = bool(other_goalie_confirm_text in goalie_confirm_list)
if pref_goalie_confirm and not game.pregametweets['goalies_pref']:
pref_goalie_tweet = (f'Projected {game.game_hashtag} Goalie '
f'for {pref_hashtag}:\n{pref_goalie_tweet_text}')
game.pregame_lasttweet = send_tweet(pref_goalie_tweet, reply=game.pregame_lasttweet)
if args.discord:
send_discord(CHANNEL_ID, pref_goalie_tweet)
game.pregametweets['goalies_pref'] = True
else:
logging.info('Preferred team goalie not yet likely or confirmed.')
if other_goalie_confirm and not game.pregametweets['goalies_other']:
other_goalie_tweet = (f'Projected {game.game_hashtag} Goalie '
f'for {other_hashtag}:\n{other_goalie_tweet_text}')
game.pregame_lasttweet = send_tweet(other_goalie_tweet, reply=game.pregame_lasttweet)
if args.discord:
send_discord(CHANNEL_ID, other_goalie_tweet)
game.pregametweets['goalies_other'] = True
else:
logging.info('Other team goalie not yet likely or confirmed.')
# Get Fantasy Labs lineups (only if tweet not sent)
if not game.pregametweets['lines']:
lineups = nhl_game_events.fantasy_lab_lines(game, game.preferred_team)
lineups_confirmed = lineups['confirmed']
# Only send lineups tweet if confirmed
if lineups_confirmed:
fwd_def_lines_tweet = lineups.get('fwd_def_lines_tweet')
power_play_lines_tweet = lineups.get('power_play_lines_tweet')
game.pregame_lasttweet = send_tweet(fwd_def_lines_tweet, reply=game.pregame_lasttweet)
if args.discord:
send_discord(CHANNEL_ID, fwd_def_lines_tweet)
game.pregame_lasttweet = send_tweet(power_play_lines_tweet, reply=game.pregame_lasttweet)
if args.discord:
send_discord(CHANNEL_ID, power_play_lines_tweet)
game.pregametweets['lines'] = True
else:
logging.info('Lineup information not yet confirmed.')
# Get Officials via Scouting the Refs (if tweet not sent)
if not game.pregametweets['refs']:
officials = other_game_info.scouting_the_refs(game, game.preferred_team)
officials_confirmed = officials.get('confirmed')
# Only send officials tweet if confirmed
if officials_confirmed:
officials_tweet = officials.get('tweet')
game.pregame_lasttweet = send_tweet(officials_tweet, reply=game.pregame_lasttweet)
if args.discord:
send_discord(CHANNEL_ID, officials_tweet)
game.pregametweets['refs'] = True
else:
logging.info('Referee information not yet posted.')
# Check if all tweets are sent
all_pregametweets_sent = all(value is True for value in game.pregametweets.values())
logging.info("Pre-Game Tweets: %s", game.pregametweets)
logging.info("Pre-Game Tweets Flag: %s", all_pregametweets_sent)
if not all_pregametweets_sent and game.game_time_countdown > 1800:
logging.info("Game State is Preview & all pre-game tweets are not sent. "
"Sleep for 30 minutes & check again.")
time.sleep(1800)
elif not all_pregametweets_sent and game.game_time_countdown < 1800:
logging.warning("Game State is Preview & all pre-game tweets are not sent. "
"Less than 30 minutes until game time so we skip these today."
"If needed, we try to get lines at the end of the game for advanced stats.")
time.sleep(game.game_time_countdown)
break
else:
logging.info("Game State is Preview & all tweets are sent. "
"Sleep for %s seconds until game time.", game.game_time_countdown)
time.sleep(game.game_time_countdown)
break
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# MAIN PROGRAM FLOW
# ------------------------------------------------------------------------------
# This line is to prevent code from being executed, if ever imported.
if __name__ == '__main__':
args = parse_arguments()
# If running in Docker, parse environment variables (instead of arguments)
# And set args.console to True to make `docker logs` easier to use
if args.docker:
# Check to see if Time Zone is set
if "TZ" not in os.environ:
print("[ERROR] Timezone environment variable not set, please add to `docker run` commmand.")
sys.exit()
if os.environ["TZ"] not in pytz.all_timezones:
print(f"[ERROR] {os.environ['TZ']} is not a valid time zone, please fix in `docker run` commmand.")
sys.exit()
# Force console argument & parse the remainder of the environment variables
args.console = True
parse_env_variables(args)
# Standardize Twitter Tokens (if being used via Docker)
if not args.notweets:
try:
if args.debugtweets:
debug_consumer_key = os.environ["TWTR_CONSUMER_KEY"]
debug_consumer_secret = os.environ["TWTR_CONSUMER_SECRET"]
debug_access_token = os.environ["TWTR_ACCESS_TOKEN"]
debug_access_secret = os.environ["TWTR_ACCESS_SECRET"]
else:
consumer_key = os.environ["TWTR_CONSUMER_KEY"]
consumer_secret = os.environ["TWTR_CONSUMER_SECRET"]
access_token = os.environ["TWTR_ACCESS_TOKEN"]
access_secret = os.environ["TWTR_ACCESS_SECRET"]
except KeyError:
print("[ERROR] Twitter API keys are not set, please add to `docker run` command.")
sys.exit()
# Setup Logging for this script
setup_logging()
if args.docker and not args.notweets:
try:
TWITTER_ID = os.environ["TWTR_HANDLE"]
TWITTER_URL = f'https://twitter.com/{TWITTER_ID}/status/'
except KeyError:
print("[ERROR] Twitter handle is not set, please add to `docker run` command.")
sys.exit()
else:
if args.debugtweets:
TWITTER_ID = config['ENDPOINTS']['DEBUG_TWITTER_HANDLE']
TWITTER_URL = f'https://twitter.com/{TWITTER_ID}/status/'
# If --team is specified, override TEAM_BOT constant
if args.docker:
try:
TEAM_BOT = args.team
except KeyError:
print("[ERROR] NHL Team is not set, please add to `docker run` command.")
sys.exit()
else:
if args.team is not None:
TEAM_BOT = args.team
# ------------------------------------------------------------------------------
# SCRIPT STARTS PROCESSING BELOW
# ------------------------------------------------------------------------------
# Log script start lines
logging.info('#' * 80)
logging.info('New instance of the Hockey Twitter Bot started.')
if args.docker:
logging.info('Running in a Docker container - environment variables parsed.')
logging.info('TIME: %s', datetime.now())
logging.info('ARGS - notweets: %s, console: %s, teamoverride: %s',
args.notweets, args.console, args.team)
logging.info('ARGS - debug: %s, debugtweets: %s, overridelines: %s',
args.debug, args.debugtweets, args.overridelines)
logging.info('ARGS - date: %s, split: %s, localdata: %s, discord: %s',
args.date, args.split, args.localdata, args.discord)
logging.info("%s\n", "#" * 80)
# Create a requests object to maintain session
req_session = requests.Session()
# Starting Discord thread
logging.info('Starting Discord Thread')
start_discord_bot()
# send_discord(CHANNEL_ID, 'TEST')
logging.info('Discord Thread started')
# Check if there is a game scheduled for today
# If there is no game, exit the program
game_today, game_info = is_game_today(get_team(TEAM_BOT))
if not game_today:
if is_linode():
logging.info(
"No game scheduled for today - shutting down Linode & exiting script.")
linode_shutdown()
else:
logging.info("No game scheduled for today - exiting script.")
sys.exit()
# For debugging purposes, print all game_info
logging.debug("Game Information: %s", game_info)
# Create a Game Object
gameobj_game_id = game_info["gamePk"]
gameobj_game_season = game_info["season"]
gameobj_game_type = game_info["gameType"]
gameobj_date_time = game_info["gameDate"]
gameobj_game_state = game_info["status"]["abstractGameState"]
if args.localdata or args.yesterday:
gameobj_game_state = "Live"
# If venue is null for some reason, extract from home_team
try:
gameobj_venue = game_info["venue"]["name"]
except KeyError:
gameobj_venue = game_info["teams"]["home"]["team"]["venue"]["name"]
gameobj_live_feed = game_info["link"]
gameobj_broadcasts = {}
try:
broadcasts = game_info["broadcasts"]
for broadcast in broadcasts:
broadcast_team = broadcast["type"]
if broadcast_team == "national":
gameobj_broadcasts["away"] = broadcast["name"]
gameobj_broadcasts["home"] = broadcast["name"]
break
else:
broadcast_channel = broadcast["name"]
gameobj_broadcasts[broadcast_team] = broadcast_channel
except KeyError:
logging.warning("Broadcasts not available - setting them to TBD.")
gameobj_broadcasts["home"] = "TBD"
gameobj_broadcasts["home"] = "TBD"
# Create Team Objects
# Note: Schedule endpoint calls 3-character 'abbreviation' - not 'triCode')
# TODO: Review record / games played for playoffs
team_objs_season_id = str(gameobj_game_id)[0:4]
team_objs_season = "{}{}".format(team_objs_season_id, int(team_objs_season_id) + 1)
awayteam_info = game_info["teams"]["away"]["team"]
awayteam_record = game_info["teams"]["away"]["leagueRecord"]
if gameobj_game_type == config['GAMETYPE']['PLAYOFFS'] or gameobj_game_type == config['GAMETYPE']['PRESEASON']:
awayteamobj_games = awayteam_record["wins"] + awayteam_record["losses"]
else:
awayteamobj_games = awayteam_record["wins"] + awayteam_record["losses"] + awayteam_record["ot"]
awayteamobj_name = awayteam_info["name"]
awayteamobj_id = awayteam_info["id"]
awayteamobj_shortname = awayteam_info["teamName"]
awayteamobj_tri = awayteam_info["abbreviation"]
try:
awayteamobj_tv = gameobj_broadcasts["away"]
except KeyError:
awayteamobj_tv = "N/A"
away_team_obj = nhl_game_events.Team(awayteamobj_id, awayteamobj_name, awayteamobj_shortname,
awayteamobj_tri, "away", awayteamobj_tv, awayteamobj_games,
awayteam_record, team_objs_season)
hometeam_info = game_info["teams"]["home"]["team"]
hometeam_record = game_info["teams"]["home"]["leagueRecord"]
if gameobj_game_type == config['GAMETYPE']['PLAYOFFS'] or gameobj_game_type == config['GAMETYPE']['PRESEASON']:
hometeamobj_games = hometeam_record["wins"] + hometeam_record["losses"]
else:
hometeamobj_games = hometeam_record["wins"] + hometeam_record["losses"] + hometeam_record["ot"]
hometeamobj_name = hometeam_info["name"]
hometeamobj_id = hometeam_info["id"]
hometeamobj_shortname = hometeam_info["teamName"]
hometeamobj_tri = hometeam_info["abbreviation"]
try:
hometeamobj_tv = gameobj_broadcasts["home"]
except KeyError:
hometeamobj_tv = "N/A"
home_team_obj = nhl_game_events.Team(hometeamobj_id, hometeamobj_name, hometeamobj_shortname,
hometeamobj_tri, "home", hometeamobj_tv, hometeamobj_games,
hometeam_record, team_objs_season)
# Check for Line Overrides
if args.overridelines:
home_team_obj.overridelines = True
away_team_obj.overridelines = True
# Set Preferred Team
home_team_obj.preferred = bool(home_team_obj.team_name == TEAM_BOT)
away_team_obj.preferred = bool(away_team_obj.team_name == TEAM_BOT)
preferred_indicator = "home" if home_team_obj.preferred else "away"
game_obj = nhl_game_events.Game(gameobj_game_id, gameobj_game_type, gameobj_date_time,
gameobj_game_state, gameobj_venue, home_team_obj,
away_team_obj, preferred_indicator, gameobj_live_feed,
gameobj_game_season)
# Get the gameday rosters (from the Live Feed)
# This is needed because in some instances a player is not included
# on the /teams/{id}/roster page for some reason
preferred_homeaway = game_obj.preferred_team.home_away
preferred_team = game_obj.preferred_team
other_team = game_obj.other_team
try:
logging.info("Getting Gameday Roster via API - %s", game_obj.live_feed)
all_players = req_session.get(game_obj.live_feed).json()
all_players = all_players.get('gameData').get('players')
for id, player in all_players.items():
team = player.get('currentTeam').get('name')
if team == preferred_team.team_name:
preferred_team.gameday_roster[id] = player
else:
other_team.gameday_roster[id] = player
except requests.exceptions.RequestException as e:
logging.error("Unable to get all players.")
logging.error(e)
# All objects are created, start the game loop
while True:
if game_obj.game_state == "Preview":
if game_obj.game_time_countdown > 0:
if args.debug:
show_all_objects()
game_preview(game_obj)
else:
logging.info(
"Past game time, but game status still 'Preview' - sleep for 30 seconds.")
get_game_events(game_obj)
time.sleep(30)
elif game_obj.game_state == "Live":
# Add try / except to avoid exits
try:
logging.info('-' * 80)
logging.info("Game is currently live - checking events after event Idx %s.",
game_obj.last_event_idx)
game_events = get_game_events(game_obj)
loop_game_events(game_events, game_obj)
logging.info("Sleeping for 5 seconds...")
time.sleep(5)
except Exception as e:
logging.error("Uncaught exception in live game loop - still sleep for 5 seconds.")
logging.error(e)
time.sleep(5)
elif game_obj.game_state == "Final":
logging.info("Game is 'Final' - increase sleep time to 10 seconds.")
game_events = get_game_events(game_obj)
script_done = parse_end_of_game(game_events, game_obj)
if not script_done:
time.sleep(10)
else:
if is_linode():
logging.info("Script is done - shutting down Linode & exiting script.")
linode_shutdown()
else:
logging.info("Script is done - exiting script.")
sys.exit()
|
googlesearch.py
|
'''
Created on May 5, 2017
@author: anthony
'''
# compact
import sys
if sys.version_info < (3,):
import urllib2
else:
import urllib
import urllib.request
import urllib.parse
import math
import re
from bs4 import BeautifulSoup
from threading import Thread
from collections import deque
from time import sleep
from unidecode import unidecode
class GoogleSearch:
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/ 58.0.3029.81 Safari/537.36"
SEARCH_URL = "https://google.com/search"
RESULT_SELECTOR = "h3.r a"
TOTAL_SELECTOR = "#resultStats"
RESULTS_PER_PAGE = 10
DEFAULT_HEADERS = [
('User-Agent', USER_AGENT),
("Accept-Language", "en-US,en;q=0.5"),
]
def search(self, query, as_epq = None, as_oq= None, as_eq = None, filetype = None, site = None, as_qdr = None, intitle = None, inurl = None,
as_rq = None, as_lq = None, lr = None, cr = 0, hl = None, num_results = 10, prefetch_pages = True, prefetch_threads = 1):
'''
Query Google returning a SearchResult object.
Keyword arguments:
query: The search query (required)
as_epq: Force the search to include the text. Useful for search filters to include the given text. Uses AND filter.
as_oq: Force the search to include the text. More advanced version of the one above but using the OR filter.
as_eq: Force the search to exclude the text. Results must NOT include any words in this string.
filetype: Only returns results that end in .text. Eg to get pdf files simply put filetype= pdf
site: Force the search to include results from a given site. ie Limits results to just the site you choose.
as_qdr: Limit the search results to include theresult included from the given time.
intitle: Force the seach to include the text in the title.
inurl: Force the seach to include the text in the url.
as_rq: Finds sites Google thinks are related to the URL you put in.
as_lq: Finds sites that link to the URL you put in.
lr: Limits the languages used to return results. Not hugely effective.
cr: Limits the search results to pages/sites from certain locations. Default 0 for your home country.
Change it to include results from another country
hl: Interface language.
num_results: Maxximum numer of results to be searched. Use lower number to get faster results. (Default value= 10)
prefetch_pages: By default the result URLs are fetched eagerly when the search request is made. (Default value= True)
Fetching can be deferred until searchResult.getText() or getMarkup() are called by passing prefetch_results = False
prefetch_threads: Only works when prefetch_results= True. (Default value= 1)
Searches the queries in seperate threads. For best results set to the maximum number your processor supports.
For optimum reasons it will reset it to the number of processor cores if it is 1.
Returns:
A SearchResult object
'''
searchResults = []
if prefetch_threads==1:
import psutil
prefetch_threads = psutil.cpu_count()
if sys.version_info < (3,):
squery = urllib2.quote(query) + ("" if intitle==None else ("+&intitle%3A\"" + urllib2.quote(intitle) + "\"")) + \
("" if intitle==None else ("+&inurl%3A\"" + urllib2.quote(inurl) + "\"")) + ("" if site==None else ("&+site%3A\"" + urllib2.quote(site) + "\""))
else:
squery = urllib.parse.quote(query) + ("" if intitle==None else ("+&intitle%3A\"" + urllib.parse.quote(intitle) + "\"")) + \
("" if inurl==None else ("+&inurl%3A\"" + urllib.parse.quote(inurl) + "\"")) + ("" if site==None else ("&+site%3A\"" + urllib.parse.quote(site) + "\""))
pages = int(math.ceil(num_results / float(GoogleSearch.RESULTS_PER_PAGE)))
fetcher_threads = deque([])
total = None;
for i in range(pages) :
startp = i * GoogleSearch.RESULTS_PER_PAGE
if sys.version_info < (3,):
opener = urllib2.build_opener()
opener.addheaders = GoogleSearch.DEFAULT_HEADERS
response = opener.open(GoogleSearch.SEARCH_URL + "?q="+ squery + ("" if as_epq==None else ("&as_epq=" + urllib2.quote(as_epq))) + \
("" if as_oq==None else ("&as_oq=" + urllib2.quote(as_oq))) + ("" if as_eq==None else ("&as_eq=" + urllib2.quote(as_eq))) + \
("" if filetype==None else ("&as_filetype=" + urllib2.quote(filetype))) + ("" if startp == 0 else ("&start=" + str(startp))) + \
("" if as_qdr==None else ("&as_qdr=" + urllib2.quote(as_qdr))) + ("" if as_rq==None else ("&as_rq=" + urllib2.quote(as_rq))) + \
("" if as_lq==None else ("&as_lq=" + urllib2.quote(as_lq))) + ("" if hl==None else ("&hl=" + urllib2.quote(hl))) + "&cr=" + str(cr))
else:
opener = urllib.request.build_opener()
opener.addheaders = GoogleSearch.DEFAULT_HEADERS
response = opener.open(GoogleSearch.SEARCH_URL + "?q="+ squery + ("" if as_epq==None else ("&as_epq=" + urllib.parse.quote(as_epq))) + \
("" if as_oq==None else ("&as_oq=" + urllib.parse.quote(as_oq))) + ("" if as_eq==None else ("&as_eq=" + urllib.parse.quote(as_eq))) + \
("" if filetype==None else ("&as_filetype=" + urllib.parse.quote(filetype))) + ("" if startp == 0 else ("&start=" + str(startp))) + \
("" if as_qdr==None else ("&as_qdr=" + urllib.parse.quote(as_qdr))) + ("" if as_rq==None else ("&as_rq=" + urllib.parse.quote(as_rq))) + \
("" if as_lq==None else ("&as_lq=" + urllib.parse.quote(as_lq))) + ("" if hl==None else ("&hl=" + urllib.parse.quote(hl))) + "&cr=" + str(cr))
#response = opener.open(surl)
soup = BeautifulSoup(response.read(), "lxml")
response.close()
if total is None:
if sys.version_info < (3,):
totalText = soup.select(GoogleSearch.TOTAL_SELECTOR)[0].children.next().encode('utf-8')
total = int(re.sub("[', ]", "", re.search("(([0-9]+[', ])*[0-9]+)", totalText).group(1)))
else:
totalText = soup.select(GoogleSearch.TOTAL_SELECTOR)[0].children.__next__().encode('utf-8')
ch1 = soup.select(GoogleSearch.TOTAL_SELECTOR)[0].children
totalText = soup.select(GoogleSearch.TOTAL_SELECTOR)[0].children.__next__().encode('utf-8')
r1 = re.search(b"(([0-9]+[',\. ])*[0-9]+)", totalText)
total = int(re.sub(b"[',\. ]", b"", r1.group(1)))
results = self.parseResults(soup.select(GoogleSearch.RESULT_SELECTOR))
if len(searchResults) + len(results) > num_results:
del results[num_results - len(searchResults):]
searchResults += results
if prefetch_pages:
for result in results:
while True:
running = 0
for thread in fetcher_threads:
if thread.is_alive():
running += 1
if running < prefetch_threads:
break
sleep(1)
fetcher_thread = Thread(target=result.getText)
fetcher_thread.start()
fetcher_threads.append(fetcher_thread)
for thread in fetcher_threads:
thread.join()
return SearchResponse(searchResults, total);
def parseResults(self, results):
searchResults = [];
for result in results:
url = result["href"];
title = result.text
searchResults.append(SearchResult(title, url))
return searchResults
class SearchResponse:
def __init__(self, results, total):
self.results = results;
self.total = total;
class SearchResult:
def __init__(self, title, url):
self.title = title
self.url = url
self.__text = None
self.__markup = None
def getText(self):
if self.__text is None:
soup = BeautifulSoup(self.getMarkup(), "lxml")
for junk in soup(["script", "style"]):
junk.extract()
self.__text = unidecode(soup.get_text())
return self.__text
def getMarkup(self):
if self.__markup is None:
if sys.version_info < (3,):
opener = urllib2.build_opener()
else:
opener = urllib.request.build_opener()
opener.addheaders = GoogleSearch.DEFAULT_HEADERS
response = opener.open(self.url);
self.__markup = response.read()
return self.__markup
def __str__(self):
return str(self.__dict__)
def __unicode__(self):
return unicode(self.__str__())
def __repr__(self):
return self.__str__()
if __name__ == "__main__":
search = GoogleSearch()
i=1
query = " ".join(sys.argv[1:])
if len(query) == 0:
query = "python"
count = 10
print ("Fetching first " + str(count) + " results for \"" + query + "\"...")
response = search.search(query, count)
print ("TOTAL: " + str(response.total) + " RESULTS")
for result in response.results:
print("RESULT #" +str (i) + ": "+ (result._SearchResult__text if result._SearchResult__text is not None else "[None]") + "\n\n")
i+=1
|
thread_cpu_test.py
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import threading, multiprocessing
def loop():
x = 0
while True:
x = x ^ 1
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=loop)
t.start()
|
__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
import sys
import threading
import time
import uuid
import warnings
from collections import namedtuple
from functools import wraps
import numpy as np
import zmq
from zmq.utils import jsonapi
__all__ = ['__version__', 'BertClient', 'ConcurrentBertClient']
# in the future client version must match with server version
__version__ = '1.9.1'
if sys.version_info >= (3, 0):
from ._py3_var import *
else:
from ._py2_var import *
_Response = namedtuple('_Response', ['id', 'content'])
Response = namedtuple('Response', ['id', 'embedding', 'tokens'])
class BertClient(object):
def __init__(self, ip='localhost', port=5555, port_out=5556,
output_fmt='ndarray', show_server_config=False,
identity=None, check_version=True, check_length=True,
check_token_info=True, ignore_all_checks=False,
timeout=-1):
""" A client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `ignore_all_checks=True`
You can also use it as a context manager:
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
bc.encode(...)
# bc is automatically closed out of the context
:type timeout: int
:type check_version: bool
:type check_length: bool
:type check_token_info: bool
:type ignore_all_checks: bool
:type identity: str
:type show_server_config: bool
:type output_fmt: str
:type port_out: int
:type port: int
:type ip: str
:param ip: the ip address of the server
:param port: port for pushing data from client to server, must be consistent with the server side config
:param port_out: port for publishing results from server to client, must be consistent with the server side config
:param output_fmt: the output format of the sentence encodes, either in numpy array or python List[List[float]] (ndarray/list)
:param show_server_config: whether to show server configs when first connected
:param identity: the UUID of this client
:param check_version: check if server has the same version as client, raise AttributeError if not the same
:param check_length: check if server `max_seq_len` is less than the sentence length before sent
:param check_token_info: check if server can return tokenization
:param ignore_all_checks: ignore all checks, set it to True if you are not sure whether the server is ready when constructing BertClient()
:param timeout: set the timeout (milliseconds) for receive operation on the client, -1 means no timeout and wait until result returns
"""
self.context = zmq.Context()
self.sender = self.context.socket(zmq.PUSH)
self.sender.setsockopt(zmq.LINGER, 0)
self.identity = identity or str(uuid.uuid4()).encode('ascii')
self.sender.connect('tcp://%s:%d' % (ip, port))
self.receiver = self.context.socket(zmq.SUB)
self.receiver.setsockopt(zmq.LINGER, 0)
self.receiver.setsockopt(zmq.SUBSCRIBE, self.identity)
self.receiver.connect('tcp://%s:%d' % (ip, port_out))
self.request_id = 0
self.timeout = timeout
self.pending_request = set()
self.pending_response = {}
if output_fmt == 'ndarray':
self.formatter = lambda x: x
elif output_fmt == 'list':
self.formatter = lambda x: x.tolist()
else:
raise AttributeError('"output_fmt" must be "ndarray" or "list"')
self.output_fmt = output_fmt
self.port = port
self.port_out = port_out
self.ip = ip
self.length_limit = 0
self.token_info_available = False
if not ignore_all_checks and (check_version or show_server_config or check_length or check_token_info):
s_status = self.server_status
if check_version and s_status['server_version'] != self.status['client_version']:
raise AttributeError('version mismatch! server version is %s but client version is %s!\n'
'consider "pip install -U bert-serving-server bert-serving-client"\n'
'or disable version-check by "BertClient(check_version=False)"' % (
s_status['server_version'], self.status['client_version']))
if check_length:
if s_status['max_seq_len'] is not None:
self.length_limit = int(s_status['max_seq_len'])
else:
self.length_limit = None
if check_token_info:
self.token_info_available = bool(s_status['show_tokens_to_client'])
if show_server_config:
self._print_dict(s_status, 'server config:')
def close(self):
"""
Gently close all connections of the client. If you are using BertClient as context manager,
then this is not necessary.
"""
self.sender.close()
self.receiver.close()
self.context.term()
def _send(self, msg, msg_len=0):
self.request_id += 1
self.sender.send_multipart([self.identity, msg, b'%d' % self.request_id, b'%d' % msg_len])
self.pending_request.add(self.request_id)
return self.request_id
def _recv(self, wait_for_req_id=None):
try:
while True:
# a request has been returned and found in pending_response
if wait_for_req_id in self.pending_response:
response = self.pending_response.pop(wait_for_req_id)
return _Response(wait_for_req_id, response)
# receive a response
response = self.receiver.recv_multipart()
request_id = int(response[-1])
# if not wait for particular response then simply return
if not wait_for_req_id or (wait_for_req_id == request_id):
self.pending_request.remove(request_id)
return _Response(request_id, response)
elif wait_for_req_id != request_id:
self.pending_response[request_id] = response
# wait for the next response
except Exception as e:
raise e
finally:
if wait_for_req_id in self.pending_request:
self.pending_request.remove(wait_for_req_id)
def _recv_ndarray(self, wait_for_req_id=None):
request_id, response = self._recv(wait_for_req_id)
arr_info, arr_val = jsonapi.loads(response[1]), response[2]
X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
return Response(request_id, self.formatter(X.reshape(arr_info['shape'])), arr_info.get('tokens', ''))
@property
def status(self):
"""
Get the status of this BertClient instance
:rtype: dict[str, str]
:return: a dictionary contains the status of this BertClient instance
"""
return {
'identity': self.identity,
'num_request': self.request_id,
'num_pending_request': len(self.pending_request),
'pending_request': self.pending_request,
'output_fmt': self.output_fmt,
'port': self.port,
'port_out': self.port_out,
'server_ip': self.ip,
'client_version': __version__,
'timeout': self.timeout
}
def _timeout(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if 'blocking' in kwargs and not kwargs['blocking']:
# override client timeout setting if `func` is called in non-blocking way
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
else:
self.receiver.setsockopt(zmq.RCVTIMEO, self.timeout)
try:
return func(self, *args, **kwargs)
except zmq.error.Again as _e:
t_e = TimeoutError(
'no response from the server (with "timeout"=%d ms), please check the following:'
'is the server still online? is the network broken? are "port" and "port_out" correct? '
'are you encoding a huge amount of data whereas the timeout is too small for that?' % self.timeout)
if _py2:
raise t_e
else:
_raise(t_e, _e)
finally:
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
return arg_wrapper
@property
@_timeout
def server_status(self):
"""
Get the current status of the server connected to this client
:return: a dictionary contains the current status of the server connected to this client
:rtype: dict[str, str]
"""
req_id = self._send(b'SHOW_CONFIG')
return jsonapi.loads(self._recv(req_id).content[1])
@_timeout
def encode(self, texts, blocking=True, is_tokenized=False, show_tokens=False):
""" Encode a list of strings to a list of vectors
`texts` should be a list of strings, each of which represents a sentence.
If `is_tokenized` is set to True, then `texts` should be list[list[str]],
outer list represents sentence and inner list represent tokens in the sentence.
Note that if `blocking` is set to False, then you need to fetch the result manually afterwards.
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
# encode untokenized sentences
bc.encode(['First do it',
'then do it right',
'then do it better'])
# encode tokenized sentences
bc.encode([['First', 'do', 'it'],
['then', 'do', 'it', 'right'],
['then', 'do', 'it', 'better']], is_tokenized=True)
:type is_tokenized: bool
:type show_tokens: bool
:type blocking: bool
:type timeout: bool
:type texts: list[str] or list[list[str]]
:param is_tokenized: whether the input texts is already tokenized
:param show_tokens: whether to include tokenization result from the server. If true, the return of the function will be a tuple
:param texts: list of sentence to be encoded. Larger list for better efficiency.
:param blocking: wait until the encoded result is returned from the server. If false, will immediately return.
:param timeout: throw a timeout error when the encoding takes longer than the predefined timeout.
:return: encoded sentence/token-level embeddings, rows correspond to sentences
:rtype: numpy.ndarray or list[list[float]]
"""
if is_tokenized:
self._check_input_lst_lst_str(texts)
else:
self._check_input_lst_str(texts)
if self.length_limit is None:
warnings.warn('server does not put a restriction on "max_seq_len", '
'it will determine "max_seq_len" dynamically according to the sequences in the batch. '
'you can restrict the sequence length on the client side for better efficiency')
elif self.length_limit and not self._check_length(texts, self.length_limit, is_tokenized):
warnings.warn('some of your sentences have more tokens than "max_seq_len=%d" set on the server, '
'as consequence you may get less-accurate or truncated embeddings.\n'
'here is what you can do:\n'
'- disable the length-check by create a new "BertClient(check_length=False)" '
'when you do not want to display this warning\n'
'- or, start a new server with a larger "max_seq_len"' % self.length_limit)
req_id = self._send(jsonapi.dumps(texts), len(texts))
if not blocking:
return None
r = self._recv_ndarray(req_id)
if self.token_info_available and show_tokens:
return r.embedding, r.tokens
elif not self.token_info_available and show_tokens:
warnings.warn('"show_tokens=True", but the server does not support showing tokenization info to clients.\n'
'here is what you can do:\n'
'- start a new server with "bert-serving-start -show_tokens_to_client ..."\n'
'- or, use "encode(show_tokens=False)"')
return r.embedding
def fetch(self, delay=.0):
""" Fetch the encoded vectors from server, use it with `encode(blocking=False)`
Use it after `encode(texts, blocking=False)`. If there is no pending requests, will return None.
Note that `fetch()` does not preserve the order of the requests! Say you have two non-blocking requests,
R1 and R2, where R1 with 256 samples, R2 with 1 samples. It could be that R2 returns first.
To fetch all results in the original sending order, please use `fetch_all(sort=True)`
:type delay: float
:param delay: delay in seconds and then run fetcher
:return: a generator that yields request id and encoded vector in a tuple, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
time.sleep(delay)
while self.pending_request:
yield self._recv_ndarray()
def fetch_all(self, sort=True, concat=False):
""" Fetch all encoded vectors from server, use it with `encode(blocking=False)`
Use it `encode(texts, blocking=False)`. If there is no pending requests, it will return None.
:type sort: bool
:type concat: bool
:param sort: sort results by their request ids. It should be True if you want to preserve the sending order
:param concat: concatenate all results into one ndarray
:return: encoded sentence/token-level embeddings in sending order
:rtype: numpy.ndarray or list[list[float]]
"""
if self.pending_request:
tmp = list(self.fetch())
if sort:
tmp = sorted(tmp, key=lambda v: v.id)
tmp = [v.embedding for v in tmp]
if concat:
if self.output_fmt == 'ndarray':
tmp = np.concatenate(tmp, axis=0)
elif self.output_fmt == 'list':
tmp = [vv for v in tmp for vv in v]
return tmp
def encode_async(self, batch_generator, max_num_batch=None, delay=0.1, **kwargs):
""" Async encode batches from a generator
:param delay: delay in seconds and then run fetcher
:param batch_generator: a generator that yields list[str] or list[list[str]] (for `is_tokenized=True`) every time
:param max_num_batch: stop after encoding this number of batches
:param `**kwargs`: the rest parameters please refer to `encode()`
:return: a generator that yields encoded vectors in ndarray, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
def run():
cnt = 0
for texts in batch_generator:
self.encode(texts, blocking=False, **kwargs)
cnt += 1
if max_num_batch and cnt == max_num_batch:
break
t = threading.Thread(target=run)
t.start()
return self.fetch(delay)
@staticmethod
def _check_length(texts, len_limit, tokenized):
if tokenized:
# texts is already tokenized as list of str
return all(len(t) <= len_limit for t in texts)
else:
# do a simple whitespace tokenizer
return all(len(t.split()) <= len_limit for t in texts)
@staticmethod
def _check_input_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"%s" must be %s, but received %s' % (texts, type([]), type(texts)))
if not len(texts):
raise ValueError(
'"%s" must be a non-empty list, but received %s with %d elements' % (texts, type(texts), len(texts)))
for idx, s in enumerate(texts):
if not isinstance(s, _str):
raise TypeError('all elements in the list must be %s, but element %d is %s' % (type(''), idx, type(s)))
if not s.strip():
raise ValueError(
'all elements in the list must be non-empty string, but element %d is %s' % (idx, repr(s)))
if _py2:
texts[idx] = _unicode(texts[idx])
@staticmethod
def _check_input_lst_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"texts" must be %s, but received %s' % (type([]), type(texts)))
if not len(texts):
raise ValueError(
'"texts" must be a non-empty list, but received %s with %d elements' % (type(texts), len(texts)))
for s in texts:
BertClient._check_input_lst_str(s)
@staticmethod
def _print_dict(x, title=None):
if title:
print(title)
for k, v in x.items():
print('%30s\t=\t%-30s' % (k, v))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class BCManager():
def __init__(self, available_bc):
self.available_bc = available_bc
self.bc = None
def __enter__(self):
self.bc = self.available_bc.pop()
return self.bc
def __exit__(self, *args):
self.available_bc.append(self.bc)
class ConcurrentBertClient(BertClient):
def __init__(self, max_concurrency=10, **kwargs):
""" A thread-safe client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `check_version=False` and `check_length=False`
:type max_concurrency: int
:param max_concurrency: the maximum number of concurrent connections allowed
"""
try:
from bert_serving.client import BertClient
except ImportError:
raise ImportError('BertClient module is not available, it is required for serving HTTP requests.'
'Please use "pip install -U bert-serving-client" to install it.'
'If you do not want to use it as an HTTP server, '
'then remove "-http_port" from the command line.')
self.available_bc = [BertClient(**kwargs) for _ in range(max_concurrency)]
self.max_concurrency = max_concurrency
def close(self):
for bc in self.available_bc:
bc.close()
def _concurrent(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
try:
with BCManager(self.available_bc) as bc:
f = getattr(bc, func.__name__)
r = f if isinstance(f, dict) else f(*args, **kwargs)
return r
except IndexError:
raise RuntimeError('Too many concurrent connections!'
'Try to increase the value of "max_concurrency", '
'currently =%d' % self.max_concurrency)
return arg_wrapper
@_concurrent
def encode(self, **kwargs):
pass
@property
@_concurrent
def server_status(self):
pass
@property
@_concurrent
def status(self):
pass
def fetch(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def fetch_all(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def encode_async(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
|
taskManager.py
|
# BSD 2-Clause License
#
# Copyright (c) 2021-2022, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
from subprocess import PIPE
from threading import RLock, Thread
import psutil
from ...error import LauncherError
from ...log import get_logger
from .util.shell import execute_async_cmd, execute_cmd
from ..utils.helpers import check_dev_log_level
logger = get_logger(__name__)
verbose_tm = check_dev_log_level()
TM_INTERVAL = 1
class TaskManager:
"""The Task Manager watches the subprocesses launched through
the asyncronous shell interface. Each task is a wrapper
around the Popen/Process instance.
The Task Managers polls processes on TM_INTERVAL
and detects job failure and completion. Upon termination, the
task returncode, output, and error are added to the task history.
When a launcher uses the task manager to start a task, the task
is either managed (by a WLM) or unmanaged (meaning not managed by
a WLM). In the latter case, the Task manager is responsible for the
lifecycle of the process.
"""
def __init__(self):
"""Initialize a task manager thread."""
self.actively_monitoring = False
self.task_history = dict()
self.tasks = []
self._lock = RLock()
def start(self):
"""Start the task manager thread
The TaskManager is run as a daemon thread meaning
that it will die when the main thread dies.
"""
monitor = Thread(name="TaskManager", daemon=True, target=self.run)
monitor.start()
def run(self):
"""Start monitoring Tasks"""
global verbose_tm
if verbose_tm:
logger.debug("Starting Task Manager")
self.actively_monitoring = True
while self.actively_monitoring:
time.sleep(TM_INTERVAL)
for task in self.tasks:
returncode = task.check_status() # poll and set returncode
# has to be != None because returncode can be 0
if returncode is not None:
output, error = task.get_io()
self.add_task_history(task.pid, returncode, output, error)
self.remove_task(task.pid)
if len(self) == 0:
self.actively_monitoring = False
if verbose_tm:
logger.debug("Sleeping, no tasks to monitor")
def start_task(self, cmd_list, cwd, env=None, out=PIPE, err=PIPE):
"""Start a task managed by the TaskManager
This is an "unmanaged" task, meaning it is NOT managed
by a workload manager
:param cmd_list: command to run
:type cmd_list: list[str]
:param cwd: current working directory
:type cwd: str
:param env: environment to launch with
:type env: dict[str, str], optional
:param out: output file, defaults to PIPE
:type out: file, optional
:param err: error file, defaults to PIPE
:type err: file, optional
:return: task id
:rtype: int
"""
self._lock.acquire()
try:
proc = execute_async_cmd(cmd_list, cwd, env=env, out=out, err=err)
task = Task(proc)
if verbose_tm:
logger.debug(f"Starting Task {task.pid}")
self.tasks.append(task)
self.task_history[task.pid] = (None, None, None)
return task.pid
finally:
self._lock.release()
def start_and_wait(self, cmd_list, cwd, env=None, timeout=None):
"""Start a task not managed by the TaskManager
This method is used by launchers to launch managed tasks
meaning that they ARE managed by a WLM.
This is primarily used for batch job launches
:param cmd_list: command to run
:type cmd_list: list[str]
:param cwd: current working directory
:type cwd: str
:param env: environment to launch with
:type env: dict[str, str], optional
:param timeout: time to wait, defaults to None
:type timeout: int, optional
:return: returncode, output, and err
:rtype: int, str, str
"""
returncode, out, err = execute_cmd(cmd_list, cwd=cwd, env=env, timeout=timeout)
if verbose_tm:
logger.debug("Ran and waited on task")
return returncode, out, err
def add_existing(self, task_id):
"""Add existing task to be managed by the TaskManager
:param task_id: task id of existing task
:type task_id: int
:raises LauncherError: If task cannot be found
"""
self._lock.acquire()
try:
process = psutil.Process(pid=task_id)
task = Task(process)
self.tasks.append(task)
self.task_history[task.pid] = (None, None, None)
except (psutil.NoSuchProcess, psutil.AccessDenied):
raise LauncherError(f"Process provided {task_id} does not exist") from None
finally:
self._lock.release()
def remove_task(self, task_id):
"""Remove a task from the TaskManager
:param task_id: id of the task to remove
:type task_id: str
"""
self._lock.acquire()
if verbose_tm:
logger.debug(f"Removing Task {task_id}")
try:
task = self[task_id]
if task.is_alive:
task.kill()
returncode = task.check_status()
out, err = task.get_io()
self.add_task_history(task_id, returncode, out, err)
self.tasks.remove(task)
except psutil.NoSuchProcess:
logger.debug("Failed to kill a task during removal")
except KeyError:
logger.debug("Failed to remove a task, task was already removed")
finally:
self._lock.release()
def get_task_update(self, task_id):
"""Get the update of a task
:param task_id: task id
:type task_id: str
:return: status, returncode, output, error
:rtype: str, int, str, str
"""
self._lock.acquire()
try:
rc, out, err = self.task_history[task_id]
# has to be == None because rc can be 0
if rc == None:
try:
task = self[task_id]
return task.status, rc, out, err
# removed forcefully either by OS or us, no returncode set
# either way, job has completed and we won't have returncode
# Usually hits when jobs last less then the TM_INTERVAL
except (KeyError, psutil.NoSuchProcess):
return "Completed", rc, out, err
# process has completed, status set manually as we don't
# save task statuses during runtime.
else:
if rc != 0:
return "Failed", rc, out, err
return "Completed", rc, out, err
finally:
self._lock.release()
def add_task_history(self, task_id, returncode, out=None, err=None):
"""Add a task to the task history
Add a task to record its future returncode, output and error
:param task_id: id of the task
:type task_id: str
:param returncode: returncode
:type returncode: int
:param out: output, defaults to None
:type out: str, optional
:param err: output, defaults to None
:type err: str, optional
"""
self.task_history[task_id] = (returncode, out, err)
def __getitem__(self, task_id):
self._lock.acquire()
try:
for task in self.tasks:
if task.pid == task_id:
return task
raise KeyError
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self.tasks)
finally:
self._lock.release()
class Task:
def __init__(self, process):
"""Initialize a task
:param process: Popen object
:type process: psutil.Popen
"""
self.process = process
self.pid = str(self.process.pid)
def check_status(self):
"""Ping the job and return the returncode if finished
:return: returncode if finished otherwise None
:rtype: int
"""
if self.owned:
return self.process.poll()
# we can't manage Processed we don't own
# have to rely on .kill() to stop.
return self.returncode
def get_io(self):
"""Get the IO from the subprocess
:return: output and error from the Popen
:rtype: str, str
"""
# Process class does not implement communicate
if not self.owned:
return None, None
output, error = self.process.communicate()
if output:
output = output.decode("utf-8")
if error:
error = error.decode("utf-8")
return output, error
def kill(self, timeout=10):
"""Kill the subprocess and all children"""
def kill_callback(proc):
logger.debug(f"Process terminated with kill {proc.pid}")
children = self.process.children(recursive=True)
children.append(self.process) # add parent process to be killed
for child in children:
child.kill()
_, alive = psutil.wait_procs(children, timeout=timeout, callback=kill_callback)
if alive:
for proc in alive:
logger.warning(f"Unable to kill emitted process {proc.pid}")
def terminate(self, timeout=10):
"""Terminate a this process and all children.
:param timeout: time to wait for task death, defaults to 10
:type timeout: int, optional
"""
def terminate_callback(proc):
logger.debug(f"Cleanly terminated task {proc.pid}")
children = self.process.children(recursive=True)
children.append(self.process) # add parent process to be killed
# try SIGTERM first for clean exit
for child in children:
if verbose_tm:
logger.debug(child)
child.terminate()
# wait for termination
_, alive = psutil.wait_procs(
children, timeout=timeout, callback=terminate_callback
)
if alive:
logger.debug(f"SIGTERM failed, using SIGKILL")
self.process.kill()
def wait(self):
self.process.wait()
@property
def returncode(self):
if self.owned:
return self.process.returncode
if self.is_alive:
return None
return 0
@property
def is_alive(self):
return self.process.is_running()
@property
def status(self):
return self.process.status()
@property
def owned(self):
if isinstance(self.process, psutil.Popen):
return True
return False
|
message_server.py
|
# Copyright 2017 Google Inc. All rights reserved.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
import Queue
import logging
import threading
import time
import tornado.ioloop
import tornado.web
MESSAGE_SERVER = None
BLANK_PAGE = """<html>
<head>
<title>Blank</title>
<style type="text/css">body {background-color: #FFF;}</style>
</head>
<body>
</body>
</html>"""
ORANGE_PAGE = """<html>
<head>
<title>Orange</title>
<style>
body {background-color: white; margin: 0;}
#o {width:100%; height: 100%; background-color: #DE640D;}
</style>
<script>
window.addEventListener('beforeunload', function() {
var o = document.getElementById('o')
o.parentNode.removeChild(o);
});
</script>
</head>
<body><div id='o'></div></body>
</html>"""
class TornadoRequestHandler(tornado.web.RequestHandler):
"""Request handler for when we are using tornado"""
def get(self):
"""Handle GET requests"""
import ujson as json
logging.debug(self.request.uri)
response = None
content_type = 'text/plain'
if self.request.uri == '/ping':
response = 'pong'
elif self.request.uri == '/blank.html':
content_type = 'text/html'
response = BLANK_PAGE
elif self.request.uri == '/orange.html':
content_type = 'text/html'
response = ORANGE_PAGE
elif self.request.uri == '/config':
# JSON config data
content_type = 'application/json'
response = '{}'
if MESSAGE_SERVER.config is not None:
response = json.dumps(MESSAGE_SERVER.config)
elif self.request.uri == '/config.html':
# HTML page that can be queried from the extension for config data
content_type = 'text/html'
response = "<html><head>\n"
if MESSAGE_SERVER.config is not None:
import cgi
response += '<div id="wptagentConfig" style="display: none;">'
response += cgi.escape(json.dumps(MESSAGE_SERVER.config))
response += '</div>'
response += "</head><body></body></html>"
if response is not None:
self.set_status(200)
self.set_header("Content-Type", content_type)
self.write(response)
def post(self):
"""Handle POST messages"""
import ujson as json
try:
messages = self.request.body
if messages is not None and len(messages):
if self.request.uri == '/log':
logging.debug(messages)
else:
for line in messages.splitlines():
line = line.strip()
if len(line):
message = json.loads(line)
if 'body' not in message and self.request.uri != '/etw':
message['body'] = None
MESSAGE_SERVER.handle_message(message)
except Exception:
pass
self.set_status(200)
class MessageServer(object):
"""Local HTTP server for interacting with the extension"""
def __init__(self):
global MESSAGE_SERVER
MESSAGE_SERVER = self
self.thread = None
self.messages = Queue.Queue()
self.config = None
self.__is_started = threading.Event()
def get_message(self, timeout):
"""Get a single message from the queue"""
message = self.messages.get(block=True, timeout=timeout)
self.messages.task_done()
return message
def flush_messages(self):
"""Flush all of the pending messages"""
try:
while True:
self.messages.get_nowait()
self.messages.task_done()
except Exception:
pass
def handle_message(self, message):
"""Add a received message to the queue"""
self.messages.put(message)
def start(self):
"""Start running the server in a background thread"""
self.__is_started.clear()
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
self.__is_started.wait(timeout=30)
def stop(self):
"""Stop running the server"""
logging.debug("Shutting down extension server")
self.must_exit = True
if self.thread is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(ioloop.stop)
self.thread.join()
self.thread = None
logging.debug("Extension server stopped")
def is_ok(self):
"""Check that the server is responding and restart it if necessary"""
import requests
import monotonic
end_time = monotonic.monotonic() + 30
server_ok = False
while not server_ok and monotonic.monotonic() < end_time:
try:
response = requests.get('http://127.0.0.1:8888/ping', timeout=10)
if response.text == 'pong':
server_ok = True
except Exception:
pass
if not server_ok:
time.sleep(5)
return server_ok
def run(self):
"""Main server loop"""
logging.debug('Starting extension server on port 8888')
application = tornado.web.Application([(r"/.*", TornadoRequestHandler)])
application.listen(8888, '127.0.0.1')
self.__is_started.set()
tornado.ioloop.IOLoop.instance().start()
|
hypothesis_test.py
|
import numpy as np
import copy
import time
from functools import partial, reduce
from future.utils import viewitems, viewkeys
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import unittest
import threading
from caffe2.python import core, workspace, tt_core, dyndep
import caffe2.python.hypothesis_test_util as hu
from caffe2.proto import caffe2_pb2
dyndep.InitOpsLibrary('@/caffe2/caffe2/fb/optimizers:sgd_simd_ops')
if workspace.has_gpu_support:
# NOTE: During GPU stress tests, the number of workers exceeds the number
# of GPUs which results in flakiness from GPU contention. As a
# result, deadlines are not enforced on CUDA runs.
_hypothesis_settings = settings
def settings(**kwargs):
if 'deadline' in kwargs:
kwargs['deadline'] = None
kwargs.setdefault('max_examples', 50)
def wrapped(f):
return _hypothesis_settings(**kwargs)(f)
return wrapped
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
@st.composite
def _tensor_and_prefix(draw, dtype, elements, min_dim=1, max_dim=4, **kwargs):
dims_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
extra_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
assume(len(dims_) + len(extra_) < max_dim)
return (draw(hu.arrays(dims_ + extra_, dtype, elements)),
draw(hu.arrays(extra_, dtype, elements)))
def _tensor_and_indices(min_dim=1, max_dim=4, dtype=np.float32,
elements=None, **kwargs):
""" generates a tensor and a list of indices of larger tensor of same dim"""
data_dims_ = st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim)
original_dim = st.integers(min_value=2, max_value=10)
return st.tuples(data_dims_, original_dim).flatmap(lambda pair: st.tuples(
st.just(pair[1]), # original dimension
hu.arrays(pair[0], dtype, elements), # data tensor
hu.arrays(pair[0][0], dtype=np.int64, elements=st.integers(
min_value=0, max_value=pair[1] - 1)),
))
_NUMPY_TYPE_TO_ENUM = {
np.float32: core.DataType.FLOAT,
np.int32: core.DataType.INT32,
np.bool: core.DataType.BOOL,
np.uint8: core.DataType.UINT8,
np.int8: core.DataType.INT8,
np.uint16: core.DataType.UINT16,
np.int16: core.DataType.INT16,
np.int64: core.DataType.INT64,
np.float64: core.DataType.DOUBLE,
}
def _dtypes(dtypes=None):
dtypes = dtypes if dtypes else [np.int32, np.int64, np.float32]
return st.sampled_from(dtypes)
def _test_binary(name, ref, filter_=None, gcs=hu.gcs,
test_gradient=False, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(
lambda dtype: hu.tensors(
n=2, dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
out=st.sampled_from(('Y', 'X1', 'X2') if allow_inplace else ('Y',)),
**gcs)
@settings(max_examples=20, deadline=None)
def test_binary(self, inputs, out, gc, dc):
op = core.CreateOperator(name, ["X1", "X2"], [out])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
# We only do gradient check with float32 types.
if test_gradient and X1.dtype == np.float32:
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
return test_binary
def _test_binary_broadcast(name, ref, filter_=None,
gcs=hu.gcs, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(lambda dtype: _tensor_and_prefix(
dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
in_place=(st.booleans() if allow_inplace else st.just(False)),
**gcs)
@settings(max_examples=3, deadline=100)
def test_binary_broadcast(self, inputs, in_place, gc, dc):
op = core.CreateOperator(
name, ["X1", "X2"], ["X1" if in_place else "Y"], broadcast=1)
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
def cast_ref(x, y):
return (np.array(ref(x, y)[0], dtype=x.dtype), )
# gradient not implemented yet
# self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], cast_ref)
return test_binary_broadcast
class TestOperators(hu.HypothesisTestCase):
def test_comparison_ops(self):
ops = {"LT": lambda x1, x2: [x1 < x2],
"LE": lambda x1, x2: [x1 <= x2],
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_row_mul(self, inputs, gc, dc):
op = core.CreateOperator("RowMul", ["X1", "X2"], ["Y"])
X1, Xtmp = inputs
X2 = Xtmp[:, 0]
def ref(x, y):
ret = np.zeros(shape=x.shape, dtype=x.dtype)
for i in range(y.size):
ret[i, ] = x[i, ] * y[i]
return [ret]
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
@given(inputs=hu.tensors(n=2), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_max(self, inputs, gc, dc):
op = core.CreateOperator("Max", ["X1", "X2"], ["Y"])
X1, X2 = inputs
# Make X1 and X2 far from each other, since X1=X2 is not differentiable
# and the step size of gradient checker is 0.05
X1[np.logical_and(X1 >= X2 - 0.05, X1 <= X2)] -= 0.05
X1[np.logical_and(X1 <= X2 + 0.05, X1 >= X2)] += 0.05
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
def elementwise_max(X, Y):
return [np.maximum(X, Y)]
self.assertReferenceChecks(gc, op, [X1, X2], elementwise_max)
def test_add(self):
def not_overflow(x):
if not isinstance(x, float):
return abs(x) < (1 << 30) - 1
return True
def ref(x, y):
return (x + y, )
_test_binary("Add", ref, filter_=not_overflow, test_gradient=True)(self)
_test_binary_broadcast("Add", ref, filter_=not_overflow)(self)
def test_sub(self):
def ref(x, y):
return (x - y, )
# TODO(jiayq): enable gradient test when implemented.
_test_binary("Sub", ref, test_gradient=True)(self)
_test_binary_broadcast("Sub", ref)(self)
def test_mul(self):
def not_overflow(x):
if not isinstance(x, float):
return abs(x) < (1 << 15) - 1
return True
def ref(x, y):
return (x * y, )
_test_binary("Mul", ref, filter_=not_overflow, test_gradient=True)(self)
_test_binary_broadcast("Mul", ref, filter_=not_overflow)(self)
def test_div(self):
def ref(x, y):
return (x / y, )
def non_zero(x):
return abs(x) > 1e-2
def div_dtypes():
return st.sampled_from([np.float32, np.float64])
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=True,
dtypes=div_dtypes, gcs=hu.gcs_cpu_only
)(self)
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=False,
dtypes=div_dtypes
)(self)
_test_binary_broadcast(
"Div", ref, filter_=non_zero, dtypes=div_dtypes)(self)
@given(X=hu.tensor(), in_place=st.booleans(), **hu.gcs)
@settings(deadline=1000)
def test_negative(self, X, in_place, gc, dc):
op = core.CreateOperator("Negative", ["X"],
["Y" if not in_place else "X"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
@settings(deadline=1000)
def test_tanh(self, X, gc, dc):
op = core.CreateOperator("Tanh", "X", "Y")
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
@settings(deadline=10000)
def test_averaged_loss(self, X, gc, dc):
op = core.CreateOperator("AveragedLoss", ["X"], ["loss"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator("Softsign", ["X"], ["X" if inplace else "Y"])
def softsign(X):
return (X / (1 + np.abs(X)),)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], softsign)
if inplace:
with self.assertRaises(Exception):
self.assertGradientChecks(gc, op, [X], 0, [0])
else:
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(
device_options=st.lists(
min_size=2,
max_size=4,
elements=st.sampled_from(hu.expanded_device_options)),
set_seed=st.booleans())
@settings(deadline=10000)
def test_random_seed_behaviour(self, device_options, set_seed):
# Assume we are always operating on CUDA or CPU, since RNG is
# inconsistent between CPU and GPU.
device_options = copy.deepcopy(device_options)
assume(len({do.device_type for do in device_options}) == 1)
if set_seed:
for do in device_options:
do.random_seed = 1000
def run(do):
# Reset each time because 'Y' may already exist in the workspace
# on a different device
workspace.ResetWorkspace()
ws = workspace.C.Workspace()
op = core.CreateOperator(
"XavierFill", [], ["Y"],
device_option=do,
shape=[2])
ws.run(op)
return ws.blobs["Y"].fetch()
ys = [run(do) for do in device_options]
for y in ys[1:]:
if set_seed:
np.testing.assert_array_equal(ys[0], y)
else:
with self.assertRaises(AssertionError):
np.testing.assert_array_equal(ys[0], y)
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
engine=st.sampled_from(["", "PACKED"]),
**hu.gcs)
@settings(deadline=10000)
def test_fully_connected_axis(self, axis, num_output, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
def prod(xs):
p = 1
for x in xs:
p *= x
return p
K = prod(list(X.shape)[axis:])
N = num_output
W = np.random.randn(N, K).astype(np.float32)
b = np.random.randn(N).astype(np.float32)
op = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
engine=engine,
axis=axis)
for name, param in [("X", X), ("W", W), ("b", b)]:
self.ws.create_blob(name).feed(param)
self.ws.run(op)
Y = self.ws.blobs["Y"].fetch()
self.assertEqual(list(Y.shape), list(X.shape)[:axis] + [N])
inputs = [X, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for param, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, param, [0])
@unittest.skipIf(not workspace.has_gpu_support,
"Skipping test due to no gpu present.")
@settings(deadline=None)
@given(hidden_size=st.integers(min_value=1, max_value=3),
num_layers=st.integers(min_value=1, max_value=3),
bidirectional=st.booleans(),
rnn_mode=st.sampled_from(["lstm"]), # TODO: "gru"
input_mode=st.sampled_from(["linear"]),
dropout=hu.floats(min_value=1.0, max_value=1.0),
T=st.integers(min_value=2, max_value=6),
N=st.integers(min_value=1, max_value=4),
D=st.integers(min_value=1, max_value=4))
def test_recurrent(self, hidden_size, num_layers, bidirectional, rnn_mode,
input_mode, dropout, T, N, D):
#there's a bug in miopen for N=1 which would be resolved in the next release.
if workspace.has_hip_support:
assume(N>1)
# Random seed, this one happens to pass
seed = 1234
np.random.seed(seed)
# set device option
if workspace.has_hip_support:
device_option = hu.hip_do
engine = 'MIOPEN'
else:
device_option = hu.gpu_do
engine = 'CUDNN'
input_weight_size = hidden_size * D
upper_layer_input_weight_size = hidden_size * hidden_size
if bidirectional:
upper_layer_input_weight_size *= 2
recurrent_weight_size = hidden_size * hidden_size
input_bias_size = hidden_size
recurrent_bias_size = hidden_size
num_directions = 2 if bidirectional else 1
first_layer_sz = input_weight_size + recurrent_weight_size + \
input_bias_size + recurrent_bias_size
upper_layer_sz = upper_layer_input_weight_size + \
recurrent_weight_size + input_bias_size + \
recurrent_bias_size
total_sz = 4 * (first_layer_sz + (num_layers - 1) * upper_layer_sz)
total_sz *= num_directions
W = np.random.rand(total_sz).astype(np.float32)
self.ws.create_blob("WEIGHT").feed(W, device_option=device_option)
op = core.CreateOperator(
"Recurrent",
["INPUT", "HIDDEN_INPUT", "CELL_INPUT", "WEIGHT"],
["OUTPUT", "HIDDEN_OUTPUT", "CELL_OUTPUT",
"RNN_SCRATCH", "DROPOUT_STATES"],
hidden_size=hidden_size,
bidirectional=bidirectional,
rnn_mode=rnn_mode,
dropout=dropout,
input_mode=input_mode,
num_layers=num_layers,
seed=seed,
engine=engine)
X = np.random.randn(T, N, D).astype(np.float32)
self.ws.create_blob("INPUT").feed(X, device_option=device_option)
W = self.ws.blobs["WEIGHT"].fetch()
H = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32)
C = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32) if rnn_mode == "lstm" else \
np.empty((1,)).astype(np.float32) # unused in GRU
inputs = [X, H, C, W]
input_idxs = [i for (i, _) in enumerate(inputs)] \
if rnn_mode == "lstm" else [0, 1, 3] # ignore C
for input_idx in input_idxs:
self.assertGradientChecks(
device_option, op, inputs, input_idx, [0],
stepsize=0.01, threshold=0.01)
@given(ndim=st.integers(1, 4),
axis=st.integers(0, 3),
add_axis=st.integers(0, 1),
num_inputs=st.integers(2, 4), **hu.gcs)
@settings(deadline=None, max_examples=50)
def test_depth_concat(self, ndim, axis, add_axis, num_inputs, gc, dc):
assume(axis < ndim)
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7][:ndim]
individual_dims = [1, 2, 3, 4, 5][:num_inputs]
inputs = []
for i in range(num_inputs):
if add_axis == 0:
# Sets a unique dim and create the input.
shape[axis] = individual_dims[i]
inputs.append(np.random.randn(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
axis=axis, add_axis=add_axis)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat(*inputs):
inputs = list(inputs)
if add_axis:
for i in range(len(inputs)):
inputs[i] = np.expand_dims(inputs[i], axis)
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat)
@given(num_inputs=st.integers(2, 4),
order=st.sampled_from([("NCHW", 1), ("NHWC", 3)]),
**hu.gcs)
@settings(deadline=10000)
def test_depth_concat_with_order(self, num_inputs, order, gc, dc):
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7]
individual_dims = [1, 2, 3, 4][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[order[1]] = individual_dims[i]
inputs.append(np.random.rand(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
order=order[0])
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat_with_order(*inputs):
inputs = list(inputs)
axis = order[1]
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat_with_order)
@given(X=hu.arrays(dims=[5, 2],
elements=hu.floats(
min_value=1.0,
max_value=10.0)
),
**hu.gcs_cpu_only)
@settings(deadline=1000)
def test_last_n_windows(self, X, gc, dc):
workspace.FeedBlob('input', X)
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
workspace.CreateBlob('output')
collect_net = core.Net('collect_net')
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_data',
[collect_net], num_iter=2))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
inputs = workspace.FetchBlob('input')
new_output = np.zeros([7, inputs.shape[1]])
for i in range(inputs.shape[0] * 2):
new_output[i % 7] = inputs[i % inputs.shape[0]]
import numpy.testing as npt
npt.assert_almost_equal(output, new_output, decimal=5)
@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, np.bool]))
@settings(deadline=1000)
def test_print(self, dtype):
data = np.random.permutation(6).astype(dtype)
self.ws.create_blob("data").feed(data)
op = core.CreateOperator("Print", "data", [])
self.ws.run(op)
@given(inputs=hu.tensors(n=2),
in_place=st.booleans(),
momentum=hu.floats(min_value=0.1, max_value=0.9),
nesterov=st.booleans(),
lr=hu.floats(min_value=0.1, max_value=0.9),
**hu.gcs)
@settings(deadline=10000)
def test_momentum_sgd(
self, inputs, in_place, momentum, nesterov, lr, gc, dc):
grad, m = inputs
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"MomentumSGD",
["grad", "m", "lr"],
["grad" if in_place else "grad_o",
"m" if in_place else "m_o"],
momentum=momentum,
nesterov=int(nesterov),
device_option=gc)
self.assertDeviceChecks(
dc, op, [grad, m, lr], [0])
# Reference
def momentum_sgd(grad, m, lr):
lr = lr[0]
if not nesterov:
adjusted_gradient = lr * grad + momentum * m
return (adjusted_gradient, adjusted_gradient)
else:
m_new = momentum * m + lr * grad
return ((1 + momentum) * m_new - momentum * m, m_new)
self.assertReferenceChecks(gc, op, [grad, m, lr], momentum_sgd)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
decay=hu.floats(min_value=0.1, max_value=0.9),
momentum=hu.floats(min_value=0.1, max_value=0.9),
lr=hu.floats(min_value=0.1, max_value=0.9),
epsilon=hu.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
@settings(deadline=10000)
def test_rmsprop_sgd(self, inputs, in_place, decay, momentum, lr, epsilon,
gc, dc):
grad, ms, mom = inputs
ms = np.abs(ms) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"RmsProp",
["grad", "ms", "mom", "lr"],
["grad" if in_place else "grad_o",
"ms" if in_place else "ms_o",
"mom" if in_place else "mom_o"],
momentum=momentum, decay=decay, epsilon=epsilon, device_option=gc)
self.assertDeviceChecks(dc, op, [grad, ms, mom, lr], [0])
def rmsprop(grad, ms, mom, lr):
lr = lr[0]
ms_o = ms + (1. - decay) * (np.square(grad) - ms)
mom_o = momentum * mom + lr * grad / np.sqrt(epsilon + ms_o)
grad_o = mom_o
return (grad_o, ms_o, mom_o)
self.assertReferenceChecks(gc, op, [grad, ms, mom, lr], rmsprop)
# Reference
@staticmethod
def _dense_ftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
# python port of Sigrid's implementation
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
w = (np.sign(z) * lambda1 - z) / (
(beta + np.sqrt(n)) / alpha + lambda2)
w[np.abs(z) <= lambda1] = 0
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=1000)
def test_ftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_ftrl, alpha, beta, lambda1, lambda2))
# Reference
@staticmethod
def _dense_gftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
old_shape = g.shape
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
output_dim = g.shape[0]
w = w.reshape(output_dim, -1)
g = g.reshape(output_dim, -1)
n = n.reshape(output_dim, -1)
z = z.reshape(output_dim, -1)
input_dim = g.shape[1]
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
z_norms = np.linalg.norm(z, 2, axis=0)
z_norms = z_norms + 1e-6
w = z * ((lambda1 * np.sqrt(output_dim)) / z_norms - 1) / \
((beta + np.sqrt(n)) / alpha + lambda2)
for i in range(input_dim):
if z_norms[i] <= lambda1 * np.sqrt(output_dim):
w[:, i] = 0
w = w.reshape(old_shape)
n = n.reshape(old_shape)
z = z.reshape(old_shape)
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_gftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"GFtrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_gftrl, alpha, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_ftrl_sgd(self, inputs, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad"],
["var", "nz"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad], [0])
# Reference
def ftrl(w, nz, i, g):
sw, snz = self._dense_ftrl(alpha, beta, lambda1, lambda2,
w[i], nz[i], g)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad], ftrl)
# Reference
@staticmethod
def _dense_ftrl_send_alpha_by_input(beta, lambda1, lambda2, w, nz, g, alpha):
return TestOperators._dense_ftrl(alpha, beta, lambda1, lambda2, w, nz,
g)
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_ftrl_sgd_send_alpha_by_input(self, inputs, in_place, alpha, beta,
lambda1, lambda2, engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad", "alpha"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad, alpha], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad, alpha],
partial(self._dense_ftrl_send_alpha_by_input, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=hu.floats(min_value=0.01, max_value=0.1),
beta=hu.floats(min_value=0.1, max_value=0.9),
lambda1=hu.floats(min_value=0.001, max_value=0.1),
lambda2=hu.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_ftrl_sgd_send_alpha_by_input(self, inputs, alpha, beta,
lambda1, lambda2, engine, gc,
dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad", "alpha"],
["var", "nz"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad, alpha], [0])
# Reference
def ftrl(w, nz, i, g, alpha):
sw, snz = self._dense_ftrl_send_alpha_by_input(beta, lambda1,
lambda2, w[i], nz[i],
g, alpha)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad, alpha],
ftrl)
@given(input=hu.tensor(max_value=20,
max_dim=1,
dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
with_remapping=st.booleans(),
**hu.gcs_no_hip)
@settings(deadline=10000)
def test_unique(self, input, with_remapping, gc, dc):
op = core.CreateOperator(
"Unique",
["input"],
["unique"] + (["remapping"] if with_remapping else []),
device_option=gc)
self.assertDeviceChecks(dc, op, [input], [0])
# Validator
def unique_valid(input, unique, remapping=None):
self.assertEqual(unique.size, len(set(input)))
self.assertEqual(sorted(unique), sorted(set(input)))
if with_remapping:
self.assertEqual(remapping.shape, input.shape)
remapped = [unique[remapping[i]] for i in range(len(input))]
np.testing.assert_array_equal(remapped, input)
self.assertValidationChecks(gc, op, [input], unique_valid)
@given(prediction=hu.arrays(dims=[10, 3],
elements=hu.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
top_k=st.integers(min_value=1, max_value=3),
**hu.gcs)
@settings(deadline=1000)
def test_accuracy(self, prediction, labels, top_k, gc, dc):
if(top_k > 1):
gc = hu.cpu_do
op = core.CreateOperator(
"Accuracy",
["prediction", "labels"],
["accuracy"],
top_k=top_k,
device_option=gc
)
def op_ref(prediction, labels, top_k):
N = prediction.shape[0]
correct = 0
for i in range(0, len(prediction)):
pred_sorted = sorted(
([item, j] for j, item in enumerate(prediction[i])),
key=lambda x: x[0],
reverse=True
)
max_ids = [x[1] for x in pred_sorted[0:top_k]]
for m in max_ids:
if m == labels[i]:
correct += 1
accuracy = correct / N
return (accuracy,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels, top_k],
reference=op_ref)
@given(target_probabilities=hu.arrays(
dims=[10], elements=hu.floats(allow_nan=False,
allow_infinity=False,
min_value=0.01,
max_value=1)),
**hu.gcs)
@settings(deadline=1000)
def test_perplexity(self, target_probabilities, gc, dc):
op = core.CreateOperator(
"Perplexity",
["target_probabilities"],
["perplexity"]
)
def op_ref(target_probabilities):
N = target_probabilities.shape[0]
perplexities = np.power(target_probabilities, -1.0 / N)
perplexity = reduce(lambda x, y: x * y, perplexities)
return (perplexity,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[target_probabilities],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_to_segment_ids(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToSegmentIds",
["lengths"],
["segment_ids"])
def op_ref(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_range_fill(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsRangeFill",
["lengths"],
["increasing_seq"])
def op_ref(lengths):
sids = []
for _, l in enumerate(lengths):
sids.extend(list(range(l)))
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_segment_ids_to_ranges(self, gc, dc):
lengths = [4, 6, 3, 2, 0, 4]
op = core.CreateOperator(
"SegmentIdsToRanges",
["segment_ids"],
["ranges"])
def op_ref(segment_ids):
ranges = [np.array([0, 0], dtype=np.int32)]
prev = 0
for i, sid in enumerate(segment_ids):
while sid != prev:
prev += 1
ranges.append(np.array([i, 0], dtype=np.int32))
ranges[-1][1] += 1
return (np.array(ranges, dtype=np.int32), )
def lengths_to_segment_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=np.array(lengths_to_segment_ids(lengths), dtype=np.int32),
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_to_ranges(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToRanges",
["lengths"],
["ranges"])
def op_ref(x):
if not x.size:
return (x.reshape((0, 2)), )
return (np.column_stack((np.concatenate(([0], np.cumsum(x)[:-1])),
x)), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(
lengths=st.lists(
st.integers(min_value=0, max_value=10), min_size=0, max_size=10
),
include_last_offset=st.booleans(),
**hu.gcs_cpu_only
)
@settings(deadline=None)
def test_lengths_to_offsets(self, lengths, include_last_offset, gc, dc):
op = core.CreateOperator(
"LengthsToOffsets",
["lengths"],
["ranges"],
include_last_offset=include_last_offset,
)
def op_ref(x):
if not x.size:
arr = [x.reshape(0)]
else:
arr = [np.concatenate(([0], np.cumsum(x)[:-1]))]
if include_last_offset:
arr[0] = np.concatenate((arr[0], np.array([np.sum(x)])))
return tuple(arr)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref,
)
@given(prediction=hu.arrays(dims=[10, 3],
elements=hu.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
**hu.gcs)
@settings(deadline=10000)
def test_multi_class_accuracy(self, prediction, labels, gc, dc):
op = core.CreateOperator(
"MultiClassAccuracy",
["prediction", "labels"],
["accuracies", "amounts"]
)
def op_ref(prediction, labels):
N = prediction.shape[0]
D = prediction.shape[1]
accuracies = np.empty(D, dtype=float)
accuracies.fill(0)
amounts = np.empty(D, dtype=int)
amounts.fill(0)
max_ids = np.argmax(prediction, axis=1)
for i in range(0, N):
max_id = max_ids[i]
label_id = labels[i]
if max_id == label_id:
accuracies[label_id] += 1
amounts[label_id] += 1
for i in range(0, D):
amount = amounts[i]
if amount:
accuracies[i] /= amount
return (accuracies, amounts,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_segment_ids_to_lengths(self, lengths, gc, dc):
op = core.CreateOperator(
"SegmentIdsToLengths",
["segment_ids"],
["lengths"])
def lengths_to_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return sids
segment_ids = lengths_to_ids(lengths)
def ids_to_lengths(ids):
ids_length = len(ids)
if ids_length == 0:
return (np.array([], dtype=np.int32),)
lengths = []
# segment id starts with 0
prev_id = -1
tmp_length = 0
for idx in range(ids_length):
cur_id = ids[idx]
if cur_id != prev_id:
if idx != 0:
lengths.append(tmp_length)
while prev_id + 1 != cur_id:
lengths.append(0)
prev_id += 1
prev_id = cur_id
tmp_length = 0
tmp_length += 1
lengths.append(tmp_length)
return (np.array(lengths, dtype=np.int32),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(segment_ids, dtype=np.int32)],
reference=ids_to_lengths)
@given(lengths=st.lists(st.integers(min_value=1, max_value=10),
min_size=0,
max_size=10),
power=st.sampled_from([0.5, 1.0, 1.5, 2.0]),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_to_weights(self, lengths, power, gc, dc):
op = core.CreateOperator(
"LengthsToWeights",
["lengths"],
["weights"],
power=power)
def lengths_to_weights(lengths):
weighted_length = []
for l in lengths:
weighted_length.extend(l * [1 / pow(l, power)])
return (np.array(weighted_length, dtype=float),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=lengths_to_weights)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
@settings(deadline=10000)
def test_abs(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Abs",
["input"],
["output"]
)
def abs_ref(input_tensor):
return (np.abs(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=abs_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(min_value=-10,
max_value=10)),
**hu.gcs)
@settings(deadline=10000)
def test_cos(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Cos",
["input"],
["output"]
)
def cos_ref(input_tensor):
return (np.cos(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=cos_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(min_value=-10,
max_value=10)),
**hu.gcs)
@settings(deadline=1000)
def test_sin(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Sin",
["input"],
["output"]
)
def sin_ref(input_tensor):
return (np.sin(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=sin_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
@settings(deadline=10000)
def test_exp(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Exp",
["input"],
["output"]
)
def exp_ref(input_tensor):
return (np.exp(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=exp_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=hu.floats(min_value=1,
max_value=10000)),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_log(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Log",
["input"],
["output"]
)
def log_ref(input_tensor):
return (np.log(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=log_ref)
self.assertGradientChecks(gc, op, [input_tensor], 0, [0])
def test_blobs_dequeue_timeout(self):
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=5,
num_blobs=1)
self.ws.run(op)
t = time.time()
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
["out"],
timeout_secs=0.2)
self.assertRaises(RuntimeError, lambda: self.ws.run(op))
t = time.time() - t
self.assertGreater(t, 0.19)
@given(num_threads=st.integers(1, 10), # noqa
num_elements=st.integers(1, 100),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
@settings(deadline=10000)
def test_blobs_queue_threading(self, num_threads, num_elements,
capacity, num_blobs, do):
"""
- Construct matrices of size N x D
- Start K threads
- Push all N rows into the queue of capacity C
- Pull all N rows out of the queue.
- Verify that the output matrices are permutation of the rows of the
original matrices.
"""
import threading
try:
import queue
except ImportError:
# Py3
import Queue as queue
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=capacity,
num_blobs=num_blobs,
device_option=do)
self.ws.run(op)
xs = [np.random.randn(num_elements, 5).astype(np.float32)
for _ in range(num_blobs)]
q = queue.Queue()
for i in range(num_elements):
q.put([x[i] for x in xs])
def enqueue(t):
while True:
feed_blobs = ["x_{}_{}".format(i, t) for i in range(num_blobs)]
op = core.CreateOperator(
"EnqueueBlobs",
["queue"] + feed_blobs,
feed_blobs,
device_option=do)
try:
elems = q.get_nowait()
for elem, feed_blob in zip(elems, feed_blobs):
self.ws.create_blob(feed_blob).feed(
elem, device_option=do)
self.ws.run(op)
except queue.Empty:
return
# Create all blobs before racing on multiple threads
# (blob creation is not threadsafe)
for t in range(num_threads):
for i in range(num_blobs):
self.ws.create_blob("x_{}_{}".format(i, t))
threads = [threading.Thread(target=enqueue, args=(t,))
for t in range(num_threads)]
for thread in threads:
thread.start()
for n in range(num_elements):
dequeue_blobs = ["y_{}_{}".format(i, n) for i in range(num_blobs)]
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
dequeue_blobs,
device_option=do)
self.ws.run(op)
for thread in threads:
thread.join()
op = core.CreateOperator("CloseBlobsQueue", ["queue"], [])
self.ws.run(op)
ys = [np.vstack([self.ws.blobs["y_{}_{}".format(i, n)].fetch()
for n in range(num_elements)])
for i in range(num_blobs)]
for i in range(num_blobs):
self.assertEqual(ys[i].shape, xs[i].shape)
for j in range(num_elements):
# Verify that the rows of the returned blob are a
# permutation. The order may be different due to
# different threads racing.
self.assertTrue(
any(np.array_equal(xs[i][j], ys[i][k])
for k in range(num_elements)))
@given(num_producers=st.integers(1, 10),
num_consumers=st.integers(1, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
@settings(deadline=None, max_examples=50)
def test_safe_blobs_queue(self, num_producers, num_consumers,
capacity, num_blobs, do):
init_net = core.Net('init_net')
queue = init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs)
producer_steps = []
truth = 0
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queue] + blobs, blobs + [status])
count = (i + 1) * 10
step = core.execution_step(name, net, num_iter=count)
truth += count
producer_steps.append(step)
producer_exit_net = core.Net('producer_exit_net')
producer_exit_net.CloseBlobsQueue([queue], 0)
producer_step = core.execution_step('producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True),
core.execution_step('producer_exit', producer_exit_net)]
)
consumer_steps = []
counters = []
const_1 = init_net.ConstantFill([], 1, value=1.0)
for i in range(num_consumers):
name = 'consumer_%d' % i
net1 = core.Net(name)
blobs = net1.SafeDequeueBlobs([queue], num_blobs + 1)
status = blobs[-1]
net2 = core.Net(name + '_counter')
counter = init_net.ConstantFill([], 1, value=0.0)
counters.append(counter)
net2.Add([counter, const_1], counter)
consumer_steps.append(core.execution_step(
name, [net1, net2], should_stop_blob=status))
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
v = 0
for counter in counters:
v += self.ws.blobs[str(counter)].fetch().tolist()
self.assertEqual(v, truth)
@given(num_queues=st.integers(1, 5),
num_iter=st.integers(5, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3))
@settings(deadline=None, max_examples=50)
def test_weighted_sample_blobs_queue(
self, num_queues, num_iter, capacity, num_blobs
):
# Create BlobsQueue for each input queue
print("num_queues", num_queues)
init_net = core.Net('init_net')
queues = [
init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs
) for _ in range(num_queues)
]
# Create multiple producer nets and one producer exist net
producer_steps = []
producer_exit_nets = []
for i in range(num_queues):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for _ in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queues[i]] + blobs, blobs + [status])
exit_net = core.Net('producer_exit_%d' % i)
exit_net.CloseBlobsQueue(queues[i], 0)
producer_exit_nets.append(exit_net)
step = core.execution_step(
name, [
core.execution_step(
'producer_%d' % i, [net], num_iter=num_iter
),
core.execution_step('producer_exit_%d' % i, [exit_net]),
]
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers',
producer_steps,
concurrent_substeps=True,
),
]
)
status_lst = []
def append(ins, outs):
status_lst.append(ins)
# Create one consumer dequeue net and one consumer exist net
consumer_net = core.Net('weight_sample_dequeue_net')
table_idx_blob = np.random.randint(low=-1, high=num_blobs, size=1)
blobs = consumer_net.WeightedSampleDequeueBlobs(
queues,
num_blobs + 1,
weights=np.random.uniform(low=0.0, high=1.0, size=(num_queues,)),
table_idx_blob=table_idx_blob[0],
)
status = blobs[-1]
consumer_net.Python(append)(status)
consumer_step = core.execution_step(
'consumer',
[
core.execution_step(
'consumer', [consumer_net], should_stop_blob=status
),
core.execution_step('producer_exit', producer_exit_nets)
]
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [producer_step, consumer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
assert len(status_lst) >= num_iter + 1
assert len(status_lst) <= num_iter * num_queues + 1
@given(
data=hu.tensor(),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_squeeze_expand_dims(self, data, gc, dc):
dims = [0, 0]
if len(data.shape) > 2:
dims.append(2)
op = core.CreateOperator(
"ExpandDims",
["data"],
["expanded"],
dims=dims)
def expand_dims_ref(data, *args, **kw):
inc_dims = list(set(dims))
inc_dims.sort()
r = data
for dim in inc_dims:
r = np.expand_dims(r, axis=dim)
return (r, )
def squeeze_ref(data, *args, **kw):
dec_dims = list(set(dims))
dec_dims.sort(reverse=True)
r = data
for dim in dec_dims:
r = np.squeeze(r, axis=dim)
return (r, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data],
reference=expand_dims_ref,
output_to_grad='expanded',
grad_reference=squeeze_ref)
@given(**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_tt_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
inp_sizes = [2, 2, 2, 2]
out_sizes = [2, 2, 2, 2]
tt_ranks = [1, 3, 3, 3, 1]
op = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=tt_ranks,
)
X = np.expand_dims(
np.random.rand(16).astype(np.float32), axis=0)
b = np.array([0] * 16).astype(np.float32)
cores = tt_core.init_tt_cores(inp_sizes, out_sizes, tt_ranks)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("b").feed(b)
self.ws.create_blob("cores").feed(cores)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
Y = Y.reshape([16])
golden = np.array([-9.51763490e-07, -1.28442286e-06,
-2.86281141e-07, 2.28865644e-07,
-1.96180017e-06, -1.78920531e-06,
9.31094666e-07, -2.04273989e-07,
1.70017107e-06, 1.64845711e-06,
-1.06099132e-06, -4.69111137e-07,
6.57552358e-08, -1.28942040e-08,
-2.29114004e-07, -1.04262714e-06])
# This golden array is dependent on the specified inp_sizes, out_sizes,
# tt_ranks, and seed. Changing these will cause the test to fail.
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=1e-10)
@given(**hu.gcs_cpu_only)
def test_tt_sls_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
factor_voc = [10, 10, 10]
factor_width = [2, 2, 2]
op = core.CreateOperator(
"TTSparseLengthsSum",
["core0", "core1", "core2", "index", "lengths"],
["Y", "core0_output", "core1_output", "indices"],
factor_i=factor_voc,
factor_j=factor_width,
ranks=[1, 16, 16, 1],
emb_size=8
)
c0 = np.ones([10, 1, 2, 16]).astype(np.float32)
c1 = np.ones([10, 16, 2, 16]).astype(np.float32)
c2 = np.ones([10, 16, 2, 1]).astype(np.float32)
# index = np.array([0, 1, 2, 1, 4], dtype=np.int)
# lengths = np.array([3, 2], dtype=np.int)
index = np.array([0, 1, 2, 1, 4], np.int64)
lengths = np.array([3, 2], np.int32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("index").feed(index)
self.ws.create_blob("lengths").feed(lengths)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
self.assertEqual(list(Y.shape), [2, 8])
golden = np.array([[768, 768, 768, 768, 768, 768, 768, 768],
[512, 512, 512, 512, 512, 512, 512, 512]])
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=0)
@given(**hu.gcs_cpu_only)
def test_tt_sls_gradientop(self, gc, dc):
op = core.CreateOperator(
"TTSparseLengthsSumGradient",
["core0", "core1", "core2", "lengths",
"core0_out", "core1_out", "indices", "dY"],
["dCore0", "dCore1", "dCore2"]
)
c0 = np.ones([10, 1, 4, 16]).astype(np.float32)
c1 = np.ones([10, 16, 4, 16]).astype(np.float32)
c2 = np.ones([10, 16, 4, 1]).astype(np.float32)
lengths = np.array([3, 2], np.int32)
c0_out = np.ones([5, 4, 16]).astype(np.float32)
c1_out = np.ones([5, 16, 16]).astype(np.float32)
indices = np.array([[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[1, 0, 0],
[4, 0, 0]], np.int64)
dY = np.ones([2, 64]).astype(np.float32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("lengths").feed(lengths)
self.ws.create_blob("core0_out").feed(c0_out)
self.ws.create_blob("core1_out").feed(c1_out)
self.ws.create_blob("indices").feed(indices)
self.ws.create_blob("dY").feed(dY)
self.ws.run(op)
dCore0 = self.ws.blobs[("dCore0")].fetch()
dCore1 = self.ws.blobs[("dCore1")].fetch()
dCore2 = self.ws.blobs[("dCore2")].fetch()
self.assertEqual(list(dCore0.shape), list(c0.shape))
self.assertEqual(list(dCore1.shape), list(c1.shape))
self.assertEqual(list(dCore2.shape), list(c2.shape))
@given(**hu.gcs_cpu_only)
def test_tt_sls_gradientop1(self, gc, dc):
op = core.CreateOperator(
"TTSparseLengthsSumGradient",
["core0", "core1", "core2", "lengths",
"core0_out", "core1_out", "indices", "dY"],
["dCore0", "dCore1", "dCore2"]
)
c0 = np.ones([101, 1, 2, 16]).astype(np.float32)
c1 = np.ones([102, 16, 2, 16]).astype(np.float32)
c2 = np.ones([153, 16, 4, 1]).astype(np.float32)
lengths = np.array([3, 2], np.int32)
c0_out = np.ones([5, 2, 16]).astype(np.float32)
c1_out = np.ones([5, 4, 16]).astype(np.float32)
indices = np.array([[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[1, 0, 0],
[4, 0, 0]], np.int64)
dY = np.ones([2, 16]).astype(np.float32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("lengths").feed(lengths)
self.ws.create_blob("core0_out").feed(c0_out)
self.ws.create_blob("core1_out").feed(c1_out)
self.ws.create_blob("indices").feed(indices)
self.ws.create_blob("dY").feed(dY)
self.ws.run(op)
dCore0 = self.ws.blobs[("dCore0")].fetch()
dCore1 = self.ws.blobs[("dCore1")].fetch()
dCore2 = self.ws.blobs[("dCore2")].fetch()
self.assertEqual(list(dCore0.shape), list(c0.shape))
self.assertEqual(list(dCore1.shape), list(c1.shape))
self.assertEqual(list(dCore2.shape), list(c2.shape))
@given(**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_tt_sls(self, gc, dc):
factor_voc = [10, 10, 10]
factor_width = [2, 2, 2]
op = core.CreateOperator(
"TTSparseLengthsSum",
["core0", "core1", "core2", "index", "lengths"],
["Y", "core0_output", "core1_output", "indices"],
factor_i=factor_voc,
factor_j=factor_width,
ranks=[1, 16, 16, 1],
emb_size=8
)
c0 = np.ones([10, 1, 2, 16]).astype(np.float32)
c1 = np.ones([10, 16, 2, 16]).astype(np.float32)
c2 = np.ones([10, 16, 2, 1]).astype(np.float32)
index = np.array([0, 1, 2, 1, 4], np.int64)
lengths = np.array([0, 3, 0, 0, 2, 0, 0], np.int32)
self.assertGradientChecks(gc, op, [c0, c1, c2, index, lengths], 0, [0])
@given(**hu.gcs_cpu_only)
def test_tt_sls_repro(self, gc, dc):
factor_voc = [125, 160, 200]
factor_width = [4, 4, 4]
op = core.CreateOperator(
"TTSparseLengthsSum",
["core0", "core1", "core2", "index", "lengths"],
["Y", "core0_output", "core1_output", "indices"],
factor_i=factor_voc,
factor_j=factor_width,
ranks=[1, 16, 16, 1],
emb_size=64
)
c0 = np.ones([125, 1, 4, 16]).astype(np.float32)
c1 = np.ones([160, 16, 4, 16]).astype(np.float32)
c2 = np.ones([200, 16, 4, 1]).astype(np.float32)
index = np.array([0, 4000000 - 1, 20000, 1000000, 4000000 - 1], np.int64)
lengths = np.array([0, 3, 0, 0, 2, 0, 0], np.int32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("index").feed(index)
self.ws.create_blob("lengths").feed(lengths)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
self.assertEqual(list(Y.shape), [7, 64])
golden = np.array([[0] * 64, [768] * 64, [0] * 64, [0] * 64, [512] * 64, [0] * 64, [0] * 64])
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=0)
@given(**hu.gcs_cpu_only)
def test_tt_sls_gradientop2(self, gc, dc):
op = core.CreateOperator(
"TTSparseLengthsSumGradient",
["core0", "core1", "core2", "lengths",
"core0_out", "core1_out", "indices", "dY"],
["dCore0", "dCore1", "dCore2"]
)
c0 = np.ones([101, 1, 2, 16]).astype(np.float32)
c1 = np.ones([102, 16, 2, 16]).astype(np.float32)
c2 = np.ones([153, 16, 4, 1]).astype(np.float32)
lengths = np.array([0, 3, 0, 0, 2, 0, 0], np.int32)
c0_out = np.ones([5, 2, 16]).astype(np.float32)
c1_out = np.ones([5, 4, 16]).astype(np.float32)
indices = np.array([[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[1, 0, 0],
[4, 0, 0]], np.int64)
dY = np.ones([7, 16]).astype(np.float32)
self.ws.create_blob("core0").feed(c0)
self.ws.create_blob("core1").feed(c1)
self.ws.create_blob("core2").feed(c2)
self.ws.create_blob("lengths").feed(lengths)
self.ws.create_blob("core0_out").feed(c0_out)
self.ws.create_blob("core1_out").feed(c1_out)
self.ws.create_blob("indices").feed(indices)
self.ws.create_blob("dY").feed(dY)
self.ws.run(op)
dCore0 = self.ws.blobs[("dCore0")].fetch()
dCore1 = self.ws.blobs[("dCore1")].fetch()
dCore2 = self.ws.blobs[("dCore2")].fetch()
self.assertEqual(list(dCore0.shape), list(c0.shape))
self.assertEqual(list(dCore1.shape), list(c1.shape))
self.assertEqual(list(dCore2.shape), list(c2.shape))
@given(num_workers=st.integers(1, 10),
net_type=st.sampled_from(
["simple", "dag"] +
(["async_dag"] if workspace.has_gpu_support else [])),
**hu.gcs)
@settings(deadline=10000)
def test_dag_net_forking(self, net_type, num_workers, gc, dc):
from caffe2.python.model_helper import ModelHelper
from caffe2.python import brew
m = ModelHelper(name="test_model")
n = 10
d = 2
depth = 2
iters = 5
np.random.seed(1701)
# Build a binary tree of FC layers, summing at each node.
for i in reversed(range(depth)):
for j in range(2 ** i):
bottom_1 = "{}_{}".format(i + 1, 2 * j)
bottom_2 = "{}_{}".format(i + 1, 2 * j + 1)
mid_1 = "{}_{}_m".format(i + 1, 2 * j)
mid_2 = "{}_{}_m".format(i + 1, 2 * j + 1)
top = "{}_{}".format(i, j)
brew.fc(
m,
bottom_1, mid_1,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
brew.fc(
m,
bottom_2, mid_2,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
m.net.Sum([mid_1, mid_2], top)
m.net.SquaredL2Distance(["0_0", "label"], "xent")
m.net.AveragedLoss("xent", "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.Proto().device_option.CopyFrom(gc)
m.param_init_net.Proto().device_option.CopyFrom(gc)
m.Proto().type = net_type
m.Proto().num_workers = num_workers
self.ws.run(m.param_init_net)
print(str(m.Proto()))
def run():
import numpy as np
np.random.seed(1701)
input_blobs = ["{}_{}".format(depth, j) for j in range(2 ** depth)]
for input_blob in input_blobs:
self.ws.create_blob(input_blob).feed(
np.random.randn(n, d).astype(np.float32),
device_option=gc)
self.ws.create_blob("label").feed(
np.random.randn(n, d).astype(np.float32),
device_option=gc)
self.ws.run(m.net)
gradients = [
self.ws.blobs[str(input_to_grad[input_blob])].fetch()
for input_blob in input_blobs]
return gradients
outputs = [run() for _ in range(iters)]
for output in outputs[1:]:
np.testing.assert_array_equal(outputs[0], output)
self.assertAlmostEqual(np.sum(np.square(output)), 91.81752,
delta=1e-2)
@given(input=hu.tensor(min_dim=2, max_dim=6),
slice_dim=st.integers(),
a=st.integers(),
b=st.integers(),
is_empty=st.booleans(),
**hu.gcs_cpu_only)
@settings(deadline=None, max_examples=50)
def test_slice(self, input, slice_dim, a, b, is_empty, gc, dc):
slice_dim = slice_dim % len(input.shape)
if (is_empty):
input = np.random.rand(*([0] + list(input.shape))).astype(np.int32)
slice_dim += 1
a = a % input.shape[slice_dim]
b = b % input.shape[slice_dim] + 1
start_vec = np.zeros(len(input.shape), dtype=np.int32)
end_vec = np.ones(len(input.shape), dtype=np.int32) * -1
start_vec[slice_dim] = min(a, b)
end_vec[slice_dim] = max(a, b)
op = core.CreateOperator(
"Slice",
["input", "start", "end"],
["output"])
def slice_ref(x, s, e):
if len(s.shape) == 0:
return x
slc = [slice(si, None if ei == -1 else ei) for si, ei in zip(s, e)]
return (x[slc], )
self.assertReferenceChecks(gc, op, [input, start_vec, end_vec],
slice_ref)
self.assertGradientChecks(gc, op, [input, start_vec, end_vec], 0, [0])
@given(data=hu.tensor(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_shape(self, data, gc, dc):
op = core.CreateOperator("Shape", ["data"], ["shape"])
self.assertReferenceChecks(gc, op, [data], lambda x: (x.shape, ))
@given(data=hu.tensor(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_shape_with_axes(self, data, gc, dc):
def shape_ref(x, y):
return ([x.shape[i] for i in y],)
axes = np.random.randint(len(data.shape), size=10).tolist()
op = core.CreateOperator("Shape", ["data"], ["shape"], axes=axes)
self.assertReferenceChecks(gc, op, [data, axes], shape_ref)
@given(x=hu.tensor(), y=hu.tensor(), **hu.gcs_cpu_only)
@settings(deadline=1000)
def test_has_elements(self, x, y, gc, dc):
op = core.CreateOperator("HasElements", ["x", "y"], ["has_elements"])
self.assertReferenceChecks(gc, op, [x, y], lambda x, y: (len(x) > 0 or len(y) > 0, ))
op = core.CreateOperator("IsEmpty", ["x"], ["is_empty"])
self.assertReferenceChecks(gc, op, [x], lambda x: (len(x) == 0, ))
@given(initial_iters=st.integers(0, 100),
max_iters=st.integers(0, 100))
@settings(deadline=10000)
def test_should_stop_as_criteria_net_execution_step(
self, initial_iters, max_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
self.ws.create_blob("num_iters").feed(
np.asarray([max_iters]).astype(np.int64))
criteria_net = core.Net("criteria")
criteria_net.GE(["iter", "num_iters"], ["stop"])
criteria_net.Proto().external_output.extend(["stop"])
plan = core.Plan('plan')
plan.AddStep(core.execution_step(
'step', [criteria_net, net],
should_stop_blob=core.BlobReference("stop")))
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], max(initial_iters, max_iters))
def test_disabled_execution_step(self):
def createNets(i, disabled):
should_stop = 'should_stop_{}'.format(i)
output = 'output_{}'.format(i)
# init content and stop signal
init = core.Net("init_{}".format(i))
init.ConstantFill(
[],
[output],
shape=[1],
value=0.0
)
init.Cast([output], [should_stop], to='bool')
# decide if disabled or not
criterion = core.Net("criterion_{}".format(i))
tmp = criterion.ConstantFill(
[],
shape=[1],
value=1.0 if disabled else 0.0
)
criterion.Cast([tmp], [should_stop], to='bool')
criterion.Proto().external_output.extend([should_stop])
# the body net is just to turn a 0 blob to 1
net = core.Net("net_{}".format(i))
net.ConstantFill(
[],
[output],
shape=[1],
value=1.0
)
# always end the loop
ender = core.Net("ender_{}".format(i))
tmp = ender.ConstantFill(
[],
shape=[1],
value=1.0
)
ender.Cast([tmp], [should_stop], to='bool')
ender.Proto().external_output.extend([should_stop])
return [init, criterion, net, ender]
nets = [createNets(1, False),
createNets(2, True),
createNets(3, False)]
steps = [
core.execution_step(
'step_1', nets[0],
should_stop_blob=core.BlobReference('should_stop_1')),
core.execution_step(
'step_2', nets[1],
should_stop_blob=core.BlobReference('should_stop_2')),
core.execution_step('step_3', nets[2])
]
expected = [1.0, 0.0, 1.0]
plan = core.Plan('plan')
plan.AddStep(core.execution_step('all_steps', steps, num_iter=3))
self.ws.run(plan)
for i, _ in enumerate(nets):
self.assertEqual(
self.ws.blobs['output_{}'.format(i + 1)].fetch()[0],
expected[i])
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100))
@settings(deadline=10000)
def test_iter_count_with_execution_step(self, initial_iters, num_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
step = core.ExecutionStep("step", [net])
step.SetIter(num_iters)
plan = core.Plan("plan")
plan.AddStep(step)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters)
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100),
num_nets=st.integers(0, 5))
@settings(deadline=None, max_examples=50)
def test_atomic_iter_with_concurrent_steps(self, initial_iters, num_iters,
num_nets):
init_net = core.Net("init_net")
iter_mutex = init_net.CreateMutex([], ["iter_mutex"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
concurrent_steps = core.ExecutionStep("concurrent_steps",
num_iter=num_iters)
for i in range(num_nets):
net = core.Net("net_{}".format(i))
net.AtomicIter([iter_mutex, "iter"], ["iter"])
step = core.ExecutionStep("step", [net])
concurrent_steps.AddSubstep(step)
concurrent_steps.SetConcurrentSubsteps(True)
plan = core.Plan("plan")
plan.AddStep(concurrent_steps)
stats_net = core.Net("stats_net")
stats_net.StatRegistryExport([], ["stats_key", "stats_val", "stats_ts"])
self.ws.run(init_net)
self.ws.run(plan)
self.ws.run(stats_net)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters * num_nets)
if num_iters * num_nets > 0:
stats_key = self.ws.blobs[("stats_key")].fetch()
atomic_iter_key = b'atomic_iter/stats/iter/num_iter'
self.assertTrue(atomic_iter_key in stats_key)
stat_val = self.ws.blobs[("stats_val")].fetch()
self.assertEqual(num_iters * num_nets, stat_val[list(stats_key).index(atomic_iter_key)])
@given(a=hu.tensor(),
src=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
dst=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
use_name=st.booleans(),
**hu.gcs)
@settings(deadline=1000)
def test_cast(self, a, src, dst, use_name, gc, dc):
a = a.astype(src)
# Casting from a float type outside the range of the integral
# type is UB.
ftypes = [np.float32, np.float64]
if src in ftypes and dst not in ftypes and dst is not np.bool:
info = np.iinfo(dst)
a = np.clip(a, info.min, info.max)
def ref(data):
return [data.astype(dst)]
to = _NUMPY_TYPE_TO_ENUM[dst]
if use_name:
to = caffe2_pb2.TensorProto.DataType.Name(to).lower()
op = core.CreateOperator('Cast', ["X"], ["Y"], to=to)
self.assertDeviceChecks(dc, op, [a], [0])
out, = self.assertReferenceChecks(gc, op, [a], ref)
self.assertEqual(dst, out.dtype)
@given(a=hu.tensor(),
eps=hu.floats(min_value=1e-4, max_value=1e-2),
a_grad=hu.tensor(elements=hu.floats(min_value=0.01, max_value=0.99)),
eps_grad=hu.floats(min_value=1e-4, max_value=1e-3),
**hu.gcs)
@settings(deadline=10000)
def test_logit(self, a, eps, a_grad, eps_grad, gc, dc):
def ref(data):
data = np.clip(data, eps, 1.0 - eps)
return (np.log(data / (1 - data)), )
# forward testing carried out in the full range of input
# to ensure original test coverage.
# gradient test carried out with reduced input range
# because the sharp increase of the logit curve at 0 and 1
# error increases dramtically when input is close to 0 or 1
# and it will fail the test.
# So we only run gradient test in the range of (0.01, 0.99)
# very occasionally, test may fail due to random accumulated error
# reduce test range to (0.02, 0.98) will improve test stability
op = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
op_grad = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps_grad)
self.assertGradientChecks(gc, op_grad, [a_grad], 0, [0],
threshold=0.04, stepsize=2e-3)
@given(a=hu.tensor(elements=hu.floats(allow_nan=True)),
value=hu.floats(min_value=-10, max_value=10),
**hu.gcs)
@settings(deadline=1000)
def test_replace_nan(self, a, value, gc, dc):
def ref(data):
out = np.copy(data)
out[np.isnan(data)] = value
return (out, )
op = core.CreateOperator('ReplaceNaN', ["X"], ["Y"], value=value)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
has_input=st.booleans(),
has_extra_shape=st.booleans(),
extra_shape=st.lists(
min_size=1, max_size=5, elements=st.integers(1, 5)),
**hu.gcs)
@settings(deadline=10000)
def test_constant_fill(self, data, has_input, has_extra_shape, extra_shape,
gc, dc):
dtype = data.dtype.type
# in opt mode, np.bool is converted into np.bool_
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = data.item(0)
gt_shape = data.shape
inputs = [data]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
if has_input:
if has_extra_shape:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
extra_shape=extra_shape,
value=value)
gt_shape += tuple(extra_shape)
else:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
value=value)
else:
op = core.CreateOperator('ConstantFill', [], ["Y"],
dtype=enum_type,
value=value,
shape=list(gt_shape))
inputs = []
def ref(inputs=None):
outputs = np.full(shape=gt_shape, fill_value=value, dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
**hu.gcs)
@settings(deadline=1000)
def test_constant_fill_from_tensor(self, data, gc, dc):
dtype = data.dtype.type
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = np.array([data.item(0)], dtype=dtype)
inputs = [data, value]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
op = core.CreateOperator(
'ConstantFill',
["X", "V"],
["Y"],
dtype=enum_type,
)
def ref(x, v):
outputs = np.full(shape=data.shape, fill_value=value[0], dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(t=st.integers(1, 5),
n=st.integers(1, 5),
d=st.integers(1, 5))
@settings(deadline=10000)
def test_elman_recurrent_network(self, t, n, d):
from caffe2.python import model_helper, brew
np.random.seed(1701)
step_net = model_helper.ModelHelper(name="Elman")
# TODO: name scope external inputs and outputs
step_net.Proto().external_input.extend(
["input_t", "seq_lengths", "timestep",
"hidden_t_prev", "gates_t_w", "gates_t_b"])
step_net.Proto().type = "simple"
step_net.Proto().external_output.extend(["hidden_t", "gates_t"])
brew.fc(step_net,
"hidden_t_prev", "gates_t", dim_in=d, dim_out=d, axis=2)
step_net.net.Sum(["gates_t", "input_t"], ["gates_t"])
step_net.net.Sigmoid(["gates_t"], ["hidden_t"])
# Initialize params for step net in the parent net
for op in step_net.param_init_net.Proto().op:
workspace.RunOperatorOnce(op)
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
step_net.Proto().op, {"hidden_t": "hidden_t_grad"})
backward_mapping = {
str(k): str(v) for k, v in viewitems(backward_mapping)
}
backward_step_net = core.Net("ElmanBackward")
del backward_step_net.Proto().op[:]
backward_step_net.Proto().op.extend(backward_ops)
assert backward_mapping["input_t"] == "gates_t_grad"
links = [
("hidden_t_prev", "hidden", 0),
("hidden_t", "hidden", 1),
("input_t", "input", 0),
]
link_internal, link_external, link_offset = zip(*links)
backward_links = [
("hidden_t_prev_grad", "hidden_grad", 0),
("hidden_t_grad", "hidden_grad", 1),
("gates_t_grad", "input_grad", 0),
]
backward_link_internal, backward_link_external, backward_link_offset = \
zip(*backward_links)
backward_step_net.Proto().external_input.extend(["hidden_t_grad"])
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_input)
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_output)
inputs = ["input", "seq_lengths", "gates_t_w", "gates_t_b", "hidden_input"]
recurrent_inputs = ["hidden_input"]
op = core.CreateOperator(
"RecurrentNetwork",
inputs,
["output", "hidden", "hidden_output", "step_workspaces"],
alias_src=["hidden", "hidden"],
alias_dst=["output", "hidden_output"],
alias_offset=[1, -1],
recurrent_states=["hidden"],
initial_recurrent_state_ids=[
inputs.index(i) for i in recurrent_inputs
],
link_internal=link_internal,
link_external=link_external,
link_offset=link_offset,
backward_link_internal=backward_link_internal,
backward_link_external=backward_link_external,
backward_link_offset=backward_link_offset,
param=[inputs.index(p) for p in step_net.params],
step_net=step_net.Proto(),
backward_step_net=backward_step_net.Proto(),
outputs_with_grads=[0],
)
workspace.FeedBlob(
"input", np.random.randn(t, n, d).astype(np.float32))
workspace.FeedBlob(
"hidden_input", np.random.randn(1, n, d).astype(np.float32))
workspace.FeedBlob(
"seq_lengths", np.random.randint(0, t, size=(n,)).astype(np.int32))
def reference(input, seq_lengths, gates_w, gates_b, hidden_input):
T = input.shape[0]
N = input.shape[1]
D = input.shape[2]
hidden = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert hidden.shape[1] == N
assert hidden.shape[2] == D
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, D)
hidden_t_prev = hidden[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T)
gates = gates.reshape(1, N, D) + input_t.reshape(1, N, D)
hidden[t + 1] = sigmoid(gates)
return hidden[1:], hidden, hidden[-1].reshape(1, N, D)
self.assertReferenceChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
reference,
outputs_to_check=[0, 1, 2])
for param in [0, 2, 3]:
self.assertGradientChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
param,
[0])
@settings(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_space_to_batch(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(n, c, h, w).astype(np.float32)
op = core.CreateOperator("SpaceToBatch", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@settings(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_batch_to_space(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(
n * block_size * block_size,
c,
(h + 2 * pad) // block_size,
(w + 2 * pad) // block_size).astype(np.float32)
op = core.CreateOperator("BatchToSpace", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(),
in_place=st.booleans(),
scale=hu.floats(min_value=-2.0, max_value=2.0),
**hu.gcs)
@settings(deadline=10000)
def test_scale(self, X, in_place, scale, gc, dc):
op = core.CreateOperator(
"Scale", ["X"], ["Y" if not in_place else "X"],
scale=scale)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(s=st.text())
def test_string_serde(self, s):
s = s.encode('ascii', 'ignore')
self.ws.create_blob("a").feed(s)
serialized = self.ws.blobs["a"].serialize("a")
self.ws.create_blob("b").deserialize(serialized)
self.assertEqual(s, self.ws.blobs[("a")].fetch())
self.assertEqual(s, self.ws.blobs[("b")].fetch())
@given(pad=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_same_pad_image(self, pad, size, input_channels, batch_size, order,
mode, gc, dc):
assume(size > pad)
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad=pad,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(pad_t=st.integers(0, 3),
pad_l=st.integers(0, 3),
pad_b=st.integers(0, 3),
pad_r=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_pad_image(self, pad_t, pad_l, pad_b, pad_r, size, input_channels,
batch_size, order, mode, gc, dc):
assume(size > max(pad_b, pad_r, pad_t, pad_l))
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad_t=pad_t,
pad_l=pad_l,
pad_b=pad_b,
pad_r=pad_r,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)),
mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)),
mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=hu.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_instance_norm(self, size, input_channels, batch_size, order,
epsilon, gc, dc):
op = core.CreateOperator(
"InstanceNorm",
["X", "scale", "bias"],
["Y"],
order=order,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
def ref_nchw(x, scale, bias):
x = x.reshape(batch_size * input_channels, size * size)
y = (x - x.mean(1)[:, np.newaxis])
y /= np.sqrt(x.var(1) + epsilon)[:, np.newaxis]
y = y.reshape(batch_size, input_channels, size, size)
y = y * scale.reshape(1, input_channels, 1, 1)
y = y + bias.reshape(1, input_channels, 1, 1)
return (y, )
def ref_nhwc(x, scale, bias):
x = x.swapaxes(2, 3).swapaxes(1, 2)
y = ref_nchw(x, scale, bias)[0]
return (y.swapaxes(1, 2).swapaxes(2, 3), )
self.assertReferenceChecks(
gc, op, [X, scale, bias],
ref_nchw if order == "NCHW" else ref_nhwc)
# TODO(jiayq): when there are backward and GPU implementations, enable
# these two.
# self.assertDeviceChecks(dc, op, [X, scale, bias], [0])
# self.assertGradientChecks(gc, op, [X, scale, bias], 0, [0])
ws = workspace.C.Workspace()
feeds = [("X", X), ("scale", scale), ("bias", bias)]
for blob, arr in feeds:
ws.create_blob(blob).feed(arr)
for _ in range(100):
ws.run(op)
for blob, arr in feeds:
np.testing.assert_array_equal(ws.blobs[blob].fetch(), arr)
@given(inp=_dtypes().flatmap(lambda dt: _tensor_and_indices(
elements=hu.elements_of_type(dt), dtype=dt)),
**hu.gcs)
@settings(deadline=10000)
def test_sparse_to_dense(self, inp, gc, dc):
first_dim, X, I = inp
if X.dtype != np.dtype('float32') and gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} :
# Cuda only support 32 bit float
print("Bailout {}".format(X.dtype))
return
if gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP}:
# Cuda version only support int32
I = I.astype(np.int32)
if X.dtype in (np.dtype('int64'), np.dtype('int32')):
assume((np.abs(X.ravel()).max() < np.iinfo('int32').max).all())
assume(np.abs(X.ravel()).astype(np.int64).sum() < np.iinfo('int32').max)
# values don't matter
D = np.zeros((first_dim,) + X.shape[1:]).astype(X.dtype)
op = core.CreateOperator("SparseToDense", ["I", "X", "D"], ["Y"])
op_noshapeinfer = core.CreateOperator("SparseToDense", ["I", "X"], ["Y"])
def sparse_to_dense(I, X, D):
O = np.zeros(D.shape, dtype=X.dtype)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
def sparse_to_dense_noshapeinfer(I, X):
O = np.zeros((np.max(I) + 1,) + X.shape[1:], dtype=X.dtype)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
self.assertReferenceChecks(gc, op, [I, X, D], sparse_to_dense)
self.assertReferenceChecks(gc, op_noshapeinfer, [I, X], sparse_to_dense_noshapeinfer)
if X.dtype == np.float32:
self.assertGradientChecks(gc, op, [I, X, D], 1, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_dot_product(self, inputs, gc, dc):
X, Y = inputs
op = core.CreateOperator("DotProduct", ["X", "Y"], 'out')
def dotproduct(X, Y):
return (np.sum(X * Y, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
K=st.integers(min_value=2, max_value=10),
pad_value=hu.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_dot_product_with_padding(self, N, M, K, pad_value, gc, dc):
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
pad_value=pad_value)
def dotproduct(X, Y):
Z = np.ones((N, max(M, K))).astype(np.float32) * pad_value
if M < K:
Z[:, :M] = X
return (np.sum(Z * Y, axis=1), )
else:
Z[:, :K] = Y
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
pad_value=hu.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_dot_product_with_rep_padding(self, N, M, pad_value, gc, dc):
K = 2 * M
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
replicate=True,
pad_value=pad_value)
def dotproduct(X, Y):
import numpy.matlib as npm
if M < K:
Z = npm.repmat(X, 1, K // M)
return (np.sum(Z * Y, axis=1), )
else:
Z = npm.repmat(Y, 1, M // K)
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_ensure_dense(self, N, M, gc, dc):
# in place
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "X")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
# or not
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "out")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
@given(N=st.integers(min_value=10, max_value=100),
M=st.integers(min_value=2, max_value=10),
num_buckets=st.integers(min_value=1, max_value=5),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_accumulate_histogram_op(self, N, M, num_buckets, gc, dc):
X = np.random.rand(N, M).astype(np.float32)
lower_bound, upper_bound = 0.1, 0.9
op = core.CreateOperator("AccumulateHistogram", ["X"],
['cur_hist', 'acc_hist'],
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
def histogram(X):
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist, acc_hist = hist, hist
return [cur_hist, acc_hist]
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertReferenceChecks(gc, op, [X], histogram)
@settings(max_examples=1, deadline=None)
@given(
queue_capacity=st.integers(2, 2),
time_sleep=st.integers(5, 10),
num_blobs_to_equeue=st.integers(1, 1),
num_blobs_to_dequeue=st.integers(2, 2),
)
def test_safe_dequeue_blob__raises_exception_when_hang(
self,
queue_capacity,
time_sleep,
num_blobs_to_equeue,
num_blobs_to_dequeue,
):
r"""
Tests SafeDequeueBlobsOp being cancellable.
Create a queue with the number of BlobsQueue less than the number
SafeDequeueBlobs to cause the hanging behavior when running the Net.
Then call cancel from the previous sleeping thread to ensure exception
is raised.
"""
def _net_instance_cancel(net_instance):
time.sleep(time_sleep)
net_instance.cancel()
init_net = core.Net("init_net")
init_net.Proto().type = "async_scheduling"
queue = init_net.CreateBlobsQueue(
[],
"queue_name",
capacity=queue_capacity,
num_blobs=num_blobs_to_equeue,
)
ws = workspace.Workspace()
ws.create_net(init_net).run()
net = core.Net("net")
net.Proto().type = "async_scheduling"
blobs = net.SafeDequeueBlobs([queue], num_blobs_to_dequeue)
net_instance = ws.create_net(net)
t = threading.Thread(target=_net_instance_cancel, args=[net_instance])
t.start()
with self.assertRaises(Exception):
net_instance.run()
t.join()
if __name__ == "__main__":
unittest.main()
|
__init__.py
|
#####################################################################
# #
# /__init__.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program runmanager, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division
import itertools
import os
import sys
import random
import time
import subprocess
import types
import threading
import traceback
import labscript_utils.h5_lock
import h5py
import numpy as np
import zprocess
__version__ = '2.0.5-dev'
if not sys.version < '3':
unicode = str
def is_valid_python_identifier(name):
import tokenize
import StringIO
try:
tokens = list(tokenize.generate_tokens(StringIO.StringIO(name).readline))
except tokenize.TokenError:
return False
if len(tokens) == 2:
(token_type, _, _, _, _), _ = tokens
return token_type == tokenize.NAME
return False
class ExpansionError(Exception):
"""An exception class so that error handling code can tell when a
parsing exception was caused by a mismatch with the expansion mode"""
pass
class TraceDictionary(dict):
def __init__(self, *args, **kwargs):
self.trace_data = None
dict.__init__(self, *args, **kwargs)
def start_trace(self):
self.trace_data = []
def __getitem__(self, key):
if self.trace_data is not None:
if key not in self.trace_data:
self.trace_data.append(key)
return dict.__getitem__(self, key)
def stop_trace(self):
trace_data = self.trace_data
self.trace_data = None
return trace_data
def new_globals_file(filename):
with h5py.File(filename, 'w') as f:
f.create_group('globals')
def add_expansion_groups(filename):
"""backward compatability, for globals files which don't have
expansion groups. Create them if they don't exist. Guess expansion
settings based on datatypes, if possible."""
# DEPRECATED
# Don't open in write mode unless we have to:
with h5py.File(filename, 'r') as f:
requires_expansion_group = []
for groupname in f['globals']:
group = f['globals'][groupname]
if not 'expansion' in group:
requires_expansion_group.append(groupname)
if requires_expansion_group:
group_globalslists = [get_globalslist(filename, groupname) for groupname in requires_expansion_group]
with h5py.File(filename, 'a') as f:
for groupname, globalslist in zip(requires_expansion_group, group_globalslists):
group = f['globals'][groupname]
subgroup = group.create_group('expansion')
# Initialise all expansion settings to blank strings:
for name in globalslist:
subgroup.attrs[name] = ''
groups = {group_name: filename for group_name in get_grouplist(filename)}
sequence_globals = get_globals(groups)
evaled_globals, global_hierarchy, expansions = evaluate_globals(sequence_globals, raise_exceptions=False)
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
value = evaled_globals[group_name][global_name]
expansion = guess_expansion_type(value)
set_expansion(filename, group_name, global_name, expansion)
def get_grouplist(filename):
# For backward compatability, add 'expansion' settings to this
# globals file, if it doesn't contain any. Guess expansion settings
# if possible.
# DEPRECATED
add_expansion_groups(filename)
with h5py.File(filename, 'r') as f:
grouplist = f['globals']
# File closes after this function call, so have to
# convert the grouplist generator to a list of strings
# before its file gets dereferenced:
return list(grouplist)
def new_group(filename, groupname):
with h5py.File(filename, 'a') as f:
if groupname in f['globals']:
raise Exception('Can\'t create group: target name already exists.')
group = f['globals'].create_group(groupname)
group.create_group('units')
group.create_group('expansion')
def rename_group(filename, oldgroupname, newgroupname):
if oldgroupname == newgroupname:
# No rename!
return
with h5py.File(filename, 'a') as f:
if newgroupname in f['globals']:
raise Exception('Can\'t rename group: target name already exists.')
f.copy(f['globals'][oldgroupname], '/globals/%s' % newgroupname)
del f['globals'][oldgroupname]
def delete_group(filename, groupname):
with h5py.File(filename, 'a') as f:
del f['globals'][groupname]
def get_globalslist(filename, groupname):
with h5py.File(filename, 'r') as f:
group = f['globals'][groupname]
# File closes after this function call, so have to convert
# the attrs to a dict before its file gets dereferenced:
return dict(group.attrs)
def new_global(filename, groupname, globalname):
if not is_valid_python_identifier(globalname):
raise ValueError('%s is not a valid Python variable name'%globalname)
with h5py.File(filename, 'a') as f:
group = f['globals'][groupname]
if globalname in group.attrs:
raise Exception('Can\'t create global: target name already exists.')
group.attrs[globalname] = ''
f['globals'][groupname]['units'].attrs[globalname] = ''
f['globals'][groupname]['expansion'].attrs[globalname] = ''
def rename_global(filename, groupname, oldglobalname, newglobalname):
if oldglobalname == newglobalname:
# No rename!
return
if not is_valid_python_identifier(newglobalname):
raise ValueError('%s is not a valid Python variable name'%newglobalname)
value = get_value(filename, groupname, oldglobalname)
units = get_units(filename, groupname, oldglobalname)
expansion = get_expansion(filename, groupname, oldglobalname)
with h5py.File(filename, 'a') as f:
group = f['globals'][groupname]
if newglobalname in group.attrs:
raise Exception('Can\'t rename global: target name already exists.')
group.attrs[newglobalname] = value
group['units'].attrs[newglobalname] = units
group['expansion'].attrs[newglobalname] = expansion
del group.attrs[oldglobalname]
del group['units'].attrs[oldglobalname]
del group['expansion'].attrs[oldglobalname]
def get_value(filename, groupname, globalname):
with h5py.File(filename, 'r') as f:
value = f['globals'][groupname].attrs[globalname]
# Replace numpy strings with python unicode strings.
# DEPRECATED, for backward compat with old files
value = unicode(value)
return value
def set_value(filename, groupname, globalname, value):
with h5py.File(filename, 'a') as f:
f['globals'][groupname].attrs[globalname] = value
def get_units(filename, groupname, globalname):
with h5py.File(filename, 'r') as f:
value = f['globals'][groupname]['units'].attrs[globalname]
# Replace numpy strings with python unicode strings.
# DEPRECATED, for backward compat with old files
value = unicode(value)
return value
def set_units(filename, groupname, globalname, units):
with h5py.File(filename, 'a') as f:
f['globals'][groupname]['units'].attrs[globalname] = units
def get_expansion(filename, groupname, globalname):
with h5py.File(filename, 'r') as f:
value = f['globals'][groupname]['expansion'].attrs[globalname]
# Replace numpy strings with python unicode strings.
# DEPRECATED, for backward compat with old files
value = unicode(value)
return value
def set_expansion(filename, groupname, globalname, expansion):
with h5py.File(filename, 'a') as f:
f['globals'][groupname]['expansion'].attrs[globalname] = expansion
def delete_global(filename, groupname, globalname):
with h5py.File(filename, 'a') as f:
group = f['globals'][groupname]
del group.attrs[globalname]
def guess_expansion_type(value):
if isinstance(value, np.ndarray) or isinstance(value, list):
return u'outer'
else:
return u''
def iterator_to_tuple(iterator, max_length=1000000):
# We want to prevent infinite length tuples, but we cannot know
# whether they are infinite or not in advance. So we'll convert to
# a tuple only if the length is less than max_length:
temp_list = []
for i, element in enumerate(iterator):
temp_list.append(element)
if i == max_length:
raise ValueError('This iterator is very long, possibly infinite. ' +
'Runmanager cannot create an infinite number of shots. ' +
'If you really want an iterator longer than %d, ' % max_length +
'please modify runmanager.iterator_to_tuple and increase max_length.')
return tuple(temp_list)
def get_all_groups(h5_files):
"""returns a dictionary of group_name: h5_path pairs from a list of h5_files."""
if isinstance(h5_files, str) or isinstance(h5_files, unicode):
h5_files = [h5_files]
groups = {}
for path in h5_files:
for group_name in get_grouplist(path):
if group_name in groups:
raise ValueError('Error: group %s is defined in both %s and %s. ' % (group_name, groups[group_name], path) +
'Only uniquely named groups can be used together '
'to make a run file.')
groups[group_name] = path
return groups
def get_globals(groups):
"""Takes a dictionary of group_name: h5_file pairs and pulls the
globals out of the groups in their files. The globals are strings
storing python expressions at this point. All these globals are
packed into a new dictionary, keyed by group_name, where the values
are dictionaries which look like {global_name: (expression, units, expansion), ...}"""
# get a list of filepaths:
filepaths = set(groups.values())
sequence_globals = {}
for filepath in filepaths:
groups_from_this_file = [g for g, f in groups.items() if f == filepath]
with h5py.File(filepath, 'r') as f:
for group_name in groups_from_this_file:
sequence_globals[group_name] = {}
try:
globals_group = f['globals'][group_name]
for global_name in globals_group.attrs:
value = globals_group.attrs[global_name]
units = globals_group['units'].attrs[global_name]
expansion = globals_group['expansion'].attrs[global_name]
# Replace numpy strings with python unicode strings.
# DEPRECATED, for backward compat with old files
value = unicode(value)
units = unicode(units)
expansion = unicode(expansion)
sequence_globals[group_name][global_name] = value, units, expansion
except:
print group_name
return sequence_globals
def evaluate_globals(sequence_globals, raise_exceptions=True):
"""Takes a dictionary of globals as returned by get_globals. These
globals are unevaluated strings. Evaluates them all in the same
namespace so that the expressions can refer to each other. Iterates
to allow for NameErrors to be resolved by subsequently defined
globals. Throws an exception if this does not result in all errors
going away. The exception contains the messages of all exceptions
which failed to be resolved. If raise_exceptions is False, any
evaluations resulting in an exception will instead return the
exception object in the results dictionary"""
# Flatten all the groups into one dictionary of {global_name:
# expression} pairs. Also create the group structure of the results
# dict, which has the same structure as sequence_globals:
all_globals = {}
results = {}
expansions = {}
global_hierarchy = {}
# Pre-fill the results dictionary with groups, this is needed for
# storing exceptions in the case of globals with the same name being
# defined in multiple groups (all of them get the exception):
for group_name in sequence_globals:
results[group_name] = {}
multiply_defined_globals = set()
for group_name in sequence_globals:
for global_name in sequence_globals[group_name]:
if global_name in all_globals:
# The same global is defined twice. Either raise an
# exception, or store the exception for each place it is
# defined, depending on whether raise_exceptions is True:
groups_with_same_global = []
for other_group_name in sequence_globals:
if global_name in sequence_globals[other_group_name]:
groups_with_same_global.append(other_group_name)
exception = ValueError('Global named \'%s\' is defined in multiple active groups:\n ' % global_name +
'\n '.join(groups_with_same_global))
if raise_exceptions:
raise exception
for other_group_name in groups_with_same_global:
results[other_group_name][global_name] = exception
multiply_defined_globals.add(global_name)
all_globals[global_name], units, expansion = sequence_globals[group_name][global_name]
expansions[global_name] = expansion
# Do not attempt to evaluate globals which are multiply defined:
for global_name in multiply_defined_globals:
del all_globals[global_name]
# Eval the expressions in the same namespace as each other:
evaled_globals = {}
# we use a "TraceDictionary" to track which globals another global depends on
sandbox = TraceDictionary()
exec('from pylab import *', sandbox, sandbox)
exec('from runmanager.functions import *', sandbox, sandbox)
exec('try: from mise import MiseParameter\nexcept: pass', sandbox, sandbox)
globals_to_eval = all_globals.copy()
previous_errors = -1
while globals_to_eval:
errors = []
for global_name, expression in globals_to_eval.copy().items():
# start the trace to determine which globals this global depends on
sandbox.start_trace()
try:
value = eval(expression, sandbox)
# Need to know the length of any generators, convert to tuple:
if isinstance(value, types.GeneratorType):
value = iterator_to_tuple(value)
# Make sure if we're zipping or outer-producting this value, that it can
# be iterated over:
if expansions[global_name] == 'outer':
try:
iter(value)
except Exception as e:
raise ExpansionError(str(e))
except Exception as e:
# Don't raise, just append the error to a list, we'll display them all later.
errors.append((global_name, e))
sandbox.stop_trace()
continue
# Put the global into the namespace so other globals can use it:
sandbox[global_name] = value
del globals_to_eval[global_name]
evaled_globals[global_name] = value
# get the results from the global trace
trace_data = sandbox.stop_trace()
# Only store names of globals (not other functions)
for key in list(trace_data): # copy the list before iterating over it
if key not in all_globals:
trace_data.remove(key)
if trace_data:
global_hierarchy[global_name] = trace_data
if len(errors) == previous_errors:
# Since some globals may refer to others, we expect maybe
# some NameErrors to have occured. There should be fewer
# NameErrors each iteration of this while loop, as globals
# that are required become defined. If there are not fewer
# errors, then there is something else wrong and we should
# raise it.
if raise_exceptions:
message = 'Error parsing globals:\n'
for global_name, exception in errors:
message += '%s: %s: %s\n' % (global_name, exception.__class__.__name__, exception.message)
raise Exception(message)
else:
for global_name, exception in errors:
evaled_globals[global_name] = exception
break
previous_errors = len(errors)
# Assemble results into a dictionary of the same format as sequence_globals:
for group_name in sequence_globals:
for global_name in sequence_globals[group_name]:
# Do not attempt to override exception objects already stored
# as the result of multiply defined globals:
if not global_name in results[group_name]:
results[group_name][global_name] = evaled_globals[global_name]
return results, global_hierarchy, expansions
def expand_globals(sequence_globals, evaled_globals):
"""Expands iterable globals according to their expansion
settings. Creates a number of 'axes' which are to be outer product'ed
together. Some of these axes have only one element, these are globals
that do not vary. Some have a set of globals being zipped together,
iterating in lock-step. Others contain a single global varying
across its values (the globals set to 'outer' expansion). Returns
a list of shots, each element of which is a dictionary for that
shot's globals."""
values = {}
expansions = {}
for group_name in sequence_globals:
for global_name in sequence_globals[group_name]:
expression, units, expansion = sequence_globals[group_name][global_name]
value = evaled_globals[group_name][global_name]
values[global_name] = value
expansions[global_name] = expansion
# Get a list of the zip keys in use:
zip_keys = set(expansions.values())
try:
zip_keys.remove('outer')
except KeyError:
pass
axes = []
global_names = []
for zip_key in zip_keys:
axis = []
for global_name in expansions:
if expansions[global_name] == zip_key:
value = values[global_name]
if not zip_key:
# Wrap up non-iterating globals (with zip_key = '') in a
# one-element list. When zipped and then outer product'ed,
# this will give us the result we want:
value = [value]
axis.append(value)
global_names.append(global_name)
axis = zip(*axis)
axes.append(axis)
# Give each global being outer-product'ed its own axis. It gets
# wrapped up in a list and zipped with itself so that it is in the
# same format as the zipped globals, ready for outer-producting
# together:
for global_name in expansions:
if expansions[global_name] == 'outer':
value = values[global_name]
axis = [value]
axis = zip(*axis)
axes.append(axis)
global_names.append(global_name)
shots = []
for axis_values in itertools.product(*axes):
# values here is a tuple of tuples, with the outer list being over
# the axes. We need to flatten it to get our individual values out
# for each global, since we no longer care what axis they are on:
global_values = [value for axis in axis_values for value in axis]
shot_globals = dict(zip(global_names, global_values))
shots.append(shot_globals)
return shots
def generate_sequence_id(scriptname):
"""Our convention for generating sequence ids. Just a timestamp and
the name of the labscript that the run file is to be compiled with."""
timestamp = time.strftime('%Y%m%dT%H%M%S', time.localtime())
scriptbase = os.path.basename(scriptname).split('.py')[0]
return timestamp + '_' + scriptbase
def make_run_files(output_folder, sequence_globals, shots, sequence_id, shuffle=False):
"""Does what it says. sequence_globals and shots are of the datatypes
returned by get_globals and get_shots, one is a nested dictionary with
string values, and the other a flat dictionary. sequence_id should
be some identifier unique to this sequence, use generate_sequence_id
to follow convention. shuffle will randomise the order that the run
files are generated in with respect to which element of shots they
come from. This function returns a *generator*. The run files are
not actually created until you loop over this generator (which gives
you the filepaths). This is useful for not having to clean up as many
unused files in the event of failed compilation of labscripts. If you
want all the run files to be created at some point, simply convert
the returned generator to a list. The filenames the run files are
given is simply the sequence_id with increasing integers appended."""
basename = os.path.join(output_folder, sequence_id)
nruns = len(shots)
ndigits = int(np.ceil(np.log10(nruns)))
if shuffle:
random.shuffle(shots)
for i, shot_globals in enumerate(shots):
runfilename = ('%s_%0' + str(ndigits) + 'd.h5') % (basename, i)
make_single_run_file(runfilename, sequence_globals, shot_globals, sequence_id, i, nruns)
yield runfilename
def make_single_run_file(filename, sequenceglobals, runglobals, sequence_id, run_no, n_runs):
"""Does what it says. runglobals is a dict of this run's globals,
the format being the same as that of one element of the list returned
by expand_globals. sequence_globals is a nested dictionary of the
type returned by get_globals. Every run file needs a sequence ID,
generate one with generate_sequence_id. This doesn't have to match
the filename of the run file you end up using, though is usually does
(exceptions being things like connection tables). run_no and n_runs
must be provided, if this run file is part of a sequence, then they
should reflect how many run files are being generated which share
this sequence_id."""
with h5py.File(filename, 'w') as f:
f.attrs['sequence_id'] = sequence_id
f.attrs['run number'] = run_no
f.attrs['n_runs'] = n_runs
f.create_group('globals')
if sequenceglobals is not None:
for groupname, groupvars in sequenceglobals.items():
group = f['globals'].create_group(groupname)
unitsgroup = group.create_group('units')
expansiongroup = group.create_group('expansion')
for name, (value, units, expansion) in groupvars.items():
group.attrs[name] = value
unitsgroup.attrs[name] = units
expansiongroup.attrs[name] = expansion
for name, value in runglobals.items():
if value is None:
# Store it as a null object reference:
value = h5py.Reference()
try:
f['globals'].attrs[name] = value
except Exception as e:
message = ('Global %s cannot be saved as an hdf5 attribute. ' % name +
'Globals can only have relatively simple datatypes, with no nested structures. ' +
'Original error was:\n' +
'%s: %s' % (e.__class__.__name__, e.message))
raise ValueError(message)
def make_run_file_from_globals_files(labscript_file, globals_files, output_path):
"""Creates a run file output_path, using all the globals from
globals_files. Uses labscript_file only to generate a sequence ID"""
groups = get_all_groups(globals_files)
sequence_globals = get_globals(groups)
evaled_globals, global_hierarchy, expansions = evaluate_globals(sequence_globals)
shots = expand_globals(sequence_globals, evaled_globals)
if len(shots) > 1:
scanning_globals = []
for global_name in evaled_globals:
if len(evaled_globals[global_name]) > 1:
scanning_globals.append(global_name)
raise ValueError('Cannot compile to a single run file: The following globals are a sequence: ' +
' '.join(scanning_globals))
sequence_id = generate_sequence_id(labscript_file)
make_single_run_file(output_path, sequence_globals, shots[0], sequence_id, 1, 1)
def compile_labscript(labscript_file, run_file):
"""Compiles labscript_file with the run file, returning
the processes return code, stdout and stderr."""
proc = subprocess.Popen([sys.executable, labscript_file, run_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return proc.returncode, stdout, stderr
def compile_labscript_with_globals_files(labscript_file, globals_files, output_path):
"""Creates a run file output_path, using all the globals from
globals_files. Compiles labscript_file with the run file, returning
the processes return code, stdout and stderr."""
make_run_file_from_globals_files(labscript_file, globals_files, output_path)
returncode, stdout, stderr = compile_labscript(labscript_file, output_path)
return returncode, stdout, stderr
def compile_labscript_async(labscript_file, run_file, stream_port, done_callback):
"""Compiles labscript_file with run_file. This function is designed
to be called in a thread. The stdout and stderr from the compilation
will be shoveled into stream_port via zmq push as it spews forth, and
when compilation is complete, done_callback will be called with a
boolean argument indicating success."""
compiler_path = os.path.join(os.path.dirname(__file__), 'batch_compiler.py')
to_child, from_child, child = zprocess.subprocess_with_queues(compiler_path, stream_port)
to_child.put(['compile', [labscript_file, run_file]])
while True:
signal, data = from_child.get()
if signal == 'done':
success = data
to_child.put(['quit', None])
child.communicate()
done_callback(success)
break
else:
raise RuntimeError((signal, data))
def compile_multishot_async(labscript_file, run_files, stream_port, done_callback):
"""Compiles labscript_file with run_files. This function is designed
to be called in a thread. The stdout and stderr from the compilation
will be shoveled into stream_port via zmq push as it spews forth,
and when each compilation is complete, done_callback will be called
with a boolean argument indicating success. Compilation will stop
after the first failure."""
compiler_path = os.path.join(os.path.dirname(__file__), 'batch_compiler.py')
to_child, from_child, child = zprocess.subprocess_with_queues(compiler_path, stream_port)
try:
for run_file in run_files:
to_child.put(['compile', [labscript_file, run_file]])
while True:
signal, data = from_child.get()
if signal == 'done':
success = data
done_callback(data)
break
if not success:
break
except Exception:
error = traceback.format_exc()
zprocess.zmq_push_multipart(stream_port, data=['stderr', error])
to_child.put(['quit', None])
child.communicate()
raise
to_child.put(['quit', None])
child.communicate()
def compile_labscript_with_globals_files_async(labscript_file, globals_files, output_path, stream_port, done_callback):
"""Same as compile_labscript_with_globals_files, except it launches
a thread to do the work and does not return anything. Instead,
stderr and stdout will be put to stream_port via zmq push in
the multipart message format ['stdout','hello, world\n'] etc. When
compilation is finished, the function done_callback will be called
a boolean argument indicating success or failure."""
try:
make_run_file_from_globals_files(labscript_file, globals_files, output_path)
thread = threading.Thread(
target=compile_labscript_async, args=[labscript_file, output_path, stream_port, done_callback])
thread.daemon = True
thread.start()
except Exception:
error = traceback.format_exc()
zprocess.zmq_push_multipart(stream_port, data=['stderr', error])
t = threading.Thread(target=done_callback, args=(False,))
t.daemon = True
t.start()
def get_shot_globals(filepath):
"""Returns the evaluated globals for a shot, for use by labscript or lyse.
Simple dictionary access as in dict(h5py.File(filepath).attrs) would be fine
except we want to apply some hacks, so it's best to do that in one place."""
params = {}
with h5py.File(filepath) as f:
for name, value in f['globals'].attrs.items():
# Convert numpy bools to normal bools:
if isinstance(value, np.bool_):
value = bool(value)
# Convert null HDF references to None:
if isinstance(value, h5py.Reference) and not value:
value = None
# Convert numpy strings to Python ones.
# DEPRECATED, for backward compat with old files.
if isinstance(value, np.str_):
value = str(value)
params[name] = value
return params
def dict_diff(dict1, dict2):
"""Return the difference between two dictionaries as a dictionary of key: [val1, val2] pairs.
Keys unique to either dictionary are included as key: [val1, '-'] or key: ['-', val2]."""
diff_keys = []
common_keys = np.intersect1d(dict1.keys(), dict2.keys())
for key in common_keys:
if np.iterable(dict1[key]) or np.iterable(dict2[key]):
if not np.array_equal(dict1[key], dict2[key]):
diff_keys.append(key)
else:
if dict1[key] != dict2[key]:
diff_keys.append(key)
dict1_unique = [key for key in dict1.keys() if key not in common_keys]
dict2_unique = [key for key in dict2.keys() if key not in common_keys]
diff = {}
for key in diff_keys:
diff[key] = [dict1[key], dict2[key]]
for key in dict1_unique:
diff[key] = [dict1[key], '-']
for key in dict2_unique:
diff[key] = ['-', dict2[key]]
return diff
|
main_train.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
import time
import pickle5 as pickle
from collections import defaultdict
from tqdm import tqdm
from tensorboardX import SummaryWriter, GlobalSummaryWriter
import torch.multiprocessing as mp
import math
from smore.models import build_model
from smore.common.util import flatten_query, list2tuple, parse_time, set_global_seed, eval_tuple, construct_graph, tuple2filterlist, maybe_download_dataset
from smore.common.config import parse_args, all_tasks, query_name_dict, name_query_dict
from smore.common.embedding.embed_optimizer import get_optim_class
from smore.cpp_sampler.sampler_clib import KGMem
from smore.training.train_process import async_aggr, train_mp
from smore.evaluation.dataloader import MultihopTestDataset, Test1pDataset, Test1pBatchDataset
from collections import namedtuple
QueryData = namedtuple('QueryData', ['data', 'buffer', 'writer_buffer'])
def setup_train_mode(args):
tasks = args.tasks.split('.')
if args.training_tasks is None:
args.training_tasks = args.tasks
training_tasks = args.training_tasks.split('.')
if args.online_sample:
if eval_tuple(args.online_sample_mode)[3] == 'wstruct':
normalized_structure_prob = np.array(eval_tuple(args.online_weighted_structure_prob)).astype(np.float32)
normalized_structure_prob /= np.sum(normalized_structure_prob)
normalized_structure_prob = normalized_structure_prob.tolist()
assert len(normalized_structure_prob) == len(training_tasks)
else:
normalized_structure_prob = [1/len(training_tasks)] * len(training_tasks)
args.normalized_structure_prob = normalized_structure_prob
train_dataset_mode, sync_steps, sparse_embeddings, async_optim, merge_mode = eval_tuple(args.train_online_mode)
update_mode, optimizer_name, optimizer_device, squeeze_flag, queue_size = eval_tuple(args.optim_mode)
assert train_dataset_mode in ['single'], "mix has been deprecated"
assert update_mode in ['aggr'], "fast has been deprecated"
assert optimizer_name in ['adagrad', 'rmsprop', 'adam']
args.sync_steps = sync_steps
args.async_optim = async_optim
args.merge_mode = merge_mode
args.sparse_embeddings = sparse_embeddings
args.sparse_device = optimizer_device
args.train_dataset_mode = train_dataset_mode
def setup_save_path(args):
cur_time = parse_time()
if args.prefix is None:
prefix = 'logs'
else:
prefix = args.prefix
print("overwritting args.save_path")
args.save_path = os.path.join(prefix, args.data_path.split('/')[-1], "{}-{}".format(args.training_tasks, args.tasks), args.geo)
if args.geo in ['box']:
tmp_str = "g-{}-mode-{}".format(args.gamma, args.box_mode)
elif args.geo in ['vec']:
tmp_str = "g-{}".format(args.gamma)
elif args.geo == 'beta':
tmp_str = "g-{}-mode-{}".format(args.gamma, args.beta_mode)
elif args.geo == 'rotate':
tmp_str = "g-{}-mode-{}".format(args.gamma, args.rotate_mode)
elif args.geo == 'distmult':
tmp_str = "g-{}-mode-{}".format(args.gamma, args.distmult_mode)
elif args.geo == 'complex':
tmp_str = "g-{}-mode-{}".format(args.gamma, args.complex_mode)
else:
tmp_str = "g-{}-mode-{}".format(args.gamma, args.model_config)
if args.negative_adversarial_sampling:
tmp_str += '-adv-{}'.format(args.adversarial_temperature)
if args.reg_coeff != 0:
tmp_str += '-reg-{}'.format(args.reg_coeff)
tmp_str += '-ngpu-{}'.format(args.gpus)
if args.online_sample:
tmp_str += '-os-{}'.format(args.online_sample_mode)
if eval_tuple(args.online_sample_mode)[3] == 'wstruct':
tmp_str += '-({})'.format(",".join(["%.2f"%i for i in args.normalized_structure_prob]))
tmp_str += '-dataset-{}'.format(args.train_online_mode)
tmp_str += '-opt-{}'.format(args.optim_mode)
if args.share_negative:
tmp_str += '-sharen'
tmp_str += '-%s' % args.sampler_type
tmp_str += '-lr_%s' % args.lr_schedule
if args.checkpoint_path is not None:
args.save_path = args.checkpoint_path
else:
args.save_path = os.path.join(args.save_path, tmp_str, cur_time)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
print ("logging to", args.save_path)
def set_logger(args):
'''
Write logs to console and log file
'''
if args.do_train:
log_file = os.path.join(args.save_path, 'train.log')
else:
log_file = os.path.join(args.save_path, 'test.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='a+'
)
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
if not args.do_train: # if not training, then create tensorboard files in some tmp location
test_name = args.eval_path.split('/')[-1]
writer = SummaryWriter(os.path.join(args.save_path, test_name))
else:
writer = SummaryWriter(args.save_path)
return writer
def get_model(args):
with open('%s/stats.txt'%args.data_path) as f:
entrel = f.readlines()
nentity = int(entrel[0].split(' ')[-1])
nrelation = int(entrel[1].split(' ')[-1])
args.nentity = nentity
args.nrelation = nrelation
model = build_model(args, nentity, nrelation, query_name_dict)
EmbedOpt = get_optim_class(args)
EmbedOpt.prepare_optimizers(args, [x[1] for x in model.named_sparse_embeddings()])
gpus = [int(i) for i in args.gpus.split(".")]
logging.info('-------------------------------'*3)
logging.info('Model Parameter Configuration:')
num_params = 0
for name, param in model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if param.requires_grad:
num_params += np.prod(param.size())
logging.info('Parameter Number: %d' % num_params)
if args.geo == 'box':
logging.info('box mode = %s' % args.box_mode)
elif args.geo == 'beta':
logging.info('beta mode = %s' % args.beta_mode)
return model
def try_load_checkpoint(args, model):
init_step = 0
current_learning_rate = args.learning_rate
warm_up_steps = args.max_steps // 2
optimizer_stats = None
if args.checkpoint_path is not None:
logging.info('Loading checkpoint %s...' % args.checkpoint_path)
checkpoint = torch.load(os.path.join(args.checkpoint_path, 'checkpoint'), map_location='cpu')
init_step = checkpoint['step']
missing, unexpected = model.load_state_dict(checkpoint['model_state_dict'], strict=False)
logging.info("Missing keys: %s" % (str(missing)))
logging.info("Unexpected keys: %s" % (str(unexpected)))
if args.do_train:
current_learning_rate = checkpoint['current_learning_rate']
warm_up_steps = checkpoint['warm_up_steps']
optimizer_stats = checkpoint['optimizer_state_dict']
else:
logging.info('Randomly Initializing %s Model...' % args.geo)
opt_stats = {
'init_step': init_step,
'warm_up_steps': warm_up_steps,
'current_learning_rate': current_learning_rate,
'optimizer_stats': optimizer_stats
}
return opt_stats
def load_1p_eval_data(args, phase):
logging.info("loading %s data for link pred" % phase)
all_data = torch.load(os.path.join(args.eval_path, "%s.pt" % phase))
if 'head_neg' in all_data: # bi-directional
logging.info('evaluating bi-directional 1p')
fwd_data = {'head': all_data['head'],
'relation': all_data['relation'] * 2,
'tail': all_data['tail']}
if 'tail_neg' in all_data:
fwd_data['tail_neg'] = all_data['tail_neg']
backwd_data = {'head': all_data['tail'],
'relation': all_data['relation'] * 2 + 1,
'tail': all_data['head']}
if 'head_neg' in backwd_data:
backwd_data['tail_neg'] = all_data['head_neg']
merged_dict = {}
for key in fwd_data:
merged_dict[key] = np.concatenate([fwd_data[key], backwd_data[key]])
else:
logging.info('evaluating uni-directional 1p')
fwd_data = {'head': all_data['head'],
'relation': all_data['relation'],
'tail': all_data['tail']}
if 'tail_neg' in all_data:
fwd_data['tail_neg'] = all_data['tail_neg']
merged_dict = fwd_data
if args.eval_batch_size > 1:
test_dataset = Test1pBatchDataset(merged_dict, args.nentity, args.nrelation)
else:
test_dataset = Test1pDataset(merged_dict, args.nentity, args.nrelation)
logging.info("%s info:" % phase)
logging.info("num queries: %s" % len(test_dataset))
buf = mp.Queue()
writer_buffer = mp.Queue()
return QueryData(test_dataset, buf, writer_buffer)
def load_eval_data(args, phase):
tasks = args.tasks.split('.')
logging.info("loading %s data" % phase)
if args.eval_path is not None:
all_data = pickle.load(open(os.path.join(args.eval_path, "all-%s-data.pkl" % phase), 'rb'))
# remove tasks not in args.tasks
query_structures_to_remove = []
for name in all_tasks:
if not args.filter_test:
continue
if 'u' in name:
name, evaluate_union = name.split('-')
else:
evaluate_union = args.evaluate_union
if name not in tasks or evaluate_union != args.evaluate_union:
query_structure = name_query_dict[name if 'u' not in name else '-'.join([name, evaluate_union])]
query_structures_to_remove.append(query_structure)
if len(query_structures_to_remove) != 0:
all_data = [data for data in all_data if data[0] not in query_structures_to_remove]
else:
print('no %s data found' % phase)
all_data = []
test_dataset = MultihopTestDataset(all_data, args.nentity, args.nrelation)
logging.info("%s info:" % phase)
logging.info("num queries: %s" % len(test_dataset))
buf = mp.Queue()
writer_buffer = mp.Queue()
return QueryData(test_dataset, buf, writer_buffer)
def write_to_writer(eval_dict, writer):
def collect_and_write(writer_buffer, mode):
metrics, average_metrics, step = writer_buffer.get()
if step == -1:
return False
for query_structure in metrics:
for metric in metrics[query_structure]:
qname = query_name_dict[query_structure] if query_structure in query_name_dict else str(query_structure)
writer.add_scalar("_".join([mode, qname, metric]), metrics[query_structure][metric], step)
for metric in average_metrics:
writer.add_scalar("_".join([mode, 'average', metric]), average_metrics[metric], step)
return True
writer_flag = True
while writer_flag:
writer_flag = False
for key in eval_dict:
if collect_and_write(eval_dict[key].writer_buffer, key):
writer_flag = True
def main(parser):
args = parser.parse_args(None)
set_global_seed(args.seed)
gpus = [int(i) for i in args.gpus.split(".")]
assert args.gpus == '.'.join([str(i) for i in range(len(gpus))]), 'only support continuous gpu ids starting from 0, please set CUDA_VISIBLE_DEVICES instead'
maybe_download_dataset(args.data_path)
setup_train_mode(args)
setup_save_path(args)
writer = set_logger(args)
model = get_model(args)
logging.info('-------------------------------'*3)
logging.info('Geo: %s' % args.geo)
logging.info('Data Path: %s' % args.data_path)
logging.info('#entity: %d' % args.nentity)
logging.info('#relation: %d' % args.nrelation)
logging.info('#max steps: %d' % args.max_steps)
logging.info('Evaluate unions using: %s' % args.evaluate_union)
kg_mem = KGMem(dtype=args.kg_dtype)
kg_mem.load(os.path.join(args.data_path, 'train_bidir.bin'))
kg_mem.share_memory()
opt_stats = try_load_checkpoint(args, model)
logging.info('tasks = %s' % args.tasks)
logging.info('init_step = %d' % opt_stats['init_step'])
if args.do_train:
logging.info("Training info:")
logging.info("{}: infinite".format(args.training_tasks))
logging.info('Start Training...')
logging.info('learning_rate = %d' % opt_stats['current_learning_rate'])
logging.info('batch_size = %d' % args.batch_size)
logging.info('hidden_dim = %d' % args.hidden_dim)
logging.info('gamma = %f' % args.gamma)
eval_dict = {}
aggr_procs = []
args.gpus = gpus
if len(gpus) > 1:
assert not args.cuda
model.share_memory()
for phase in ['valid', 'test']:
if getattr(args, 'do_%s' % phase, False):
if args.eval_link_pred: # load ogb benchmark 1p dataset
d = load_1p_eval_data(args, phase)
else:
d = load_eval_data(args, phase)
result_aggregator = mp.Process(target=async_aggr, args=(args, d.buffer, d.writer_buffer, 'phase'))
result_aggregator.start()
aggr_procs.append(result_aggregator)
eval_dict[phase] = d
if args.feature_folder is not None:
logging.info('loading static entity+relation features from %s' % args.feature_folder)
ro_feat = {
"entity": torch.tensor(np.load(os.path.join(args.feature_folder, "entity_feat.npy")), dtype=torch.float16),
"relation": torch.tensor(np.load(os.path.join(args.feature_folder, "relation_feat.npy")), dtype=torch.float16)
}
else:
ro_feat = None
procs = []
training_tasks = args.training_tasks.split('.')
for rank, gpu_id in enumerate(gpus):
logging.info("[GPU {}] tasks: {}".format(gpu_id, args.training_tasks))
local_eval_dict = {}
for phase in eval_dict:
q_data = eval_dict[phase]
nq_per_proc = math.ceil(len(q_data.data) / len(gpus))
local_eval_dict[phase] = QueryData(q_data.data.subset(rank * nq_per_proc, nq_per_proc), q_data.buffer, q_data.writer_buffer)
proc = mp.Process(target=train_mp, args=(args, kg_mem, opt_stats, model, local_eval_dict, training_tasks, ro_feat, gpu_id))
procs.append(proc)
proc.start()
write_to_writer(eval_dict, writer)
for proc in procs + aggr_procs:
proc.join()
logging.info("Training finished!!")
if __name__ == '__main__':
torch.multiprocessing.set_start_method('spawn')
main(parse_args())
|
test_secappend.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sys
import os
import traceback
import random
import time
import json
import Queue
from twisted.application import service, internet
from twisted.python.log import ILogObserver
from twisted.internet import reactor, task, defer, threads
from threading import Thread
from kademlia import log
from calvin.runtime.south.storage.twistedimpl.securedht.append_server import AppendServer
# _log = get_logger(__name__)
class KNet(object):
def __init__(self, number, server_type=AppendServer):
self.nodes = []
self.boot_strap = None
if not reactor.running:
print "Starting reactor only once"
self.reactor_thread = Thread(target=reactor.run, args=(False,)).start()
for a in xrange(number):
self.nodes.append(ServerApp(server_type))
def start(self):
bootstrap = []
for a in self.nodes:
port, kserver = a.start(0, bootstrap)
if len(bootstrap) < 100:
bootstrap.append(("127.0.0.1", port))
# Wait for them to start
time.sleep(.8)
def stop(self):
for node in self.nodes:
node.stop()
self.nodes = []
time.sleep(1)
def get_rand_node(self):
index = random.randint(0, max(0, len(self.nodes) - 1))
return self.nodes[index]
class ServerApp(object):
def __init__(self, server_type):
self.server_type = server_type
def start(self, port=0, boot_strap=[]):
self.kserver = self.server_type()
self.kserver.bootstrap(boot_strap)
self.port = threads.blockingCallFromThread(reactor, reactor.listenUDP, port, self.kserver.protocol)
print "Starting server:", self.port
time.sleep(.2)
return self.port.getHost().port, self.kserver
def call(self, func, *args, **kwargs):
reactor.callFromThread(func, *args, **kwargs)
def __getattr__(self, name):
class caller:
def __init__(self, f, func):
self.f = f
self.func = func
def __call__(self, *args, **kwargs):
# _log.debug("Calling %s(%s, %s, %s)" %(self.f, self.func, args, kwargs))
return self.func(*args, **kwargs)
if hasattr(self.kserver, name) and callable(getattr(self.kserver, name)):
return caller(self.call, getattr(self.kserver, name))
else:
# Default behaviour
raise AttributeError
def get_port(self):
return self.port
def stop(self):
result = threads.blockingCallFromThread(reactor, self.port.stopListening)
def normal_test(match):
def test(obj):
if obj != match:
print("%s != %s" % (repr(obj), repr(match)))
return obj == match
return test
def json_test(match):
try:
jmatch = json.loads(match)
except:
print("Not JSON in json test!!!")
return False
def test(obj):
try:
jobj = json.loads(obj)
except:
print("Not JSON in json test!!!")
return False
if jobj != jmatch and not isinstance(jobj, list) and not isinstance(jmatch, list):
print("%s != %s" % (repr(jobj), repr(jmatch)))
if isinstance(jobj, list) and isinstance(jmatch, list):
return set(jobj) == set(jmatch)
return jobj == jmatch
return test
def do_sync(func, **kwargs):
test = None
timeout = .2
if 'timeout' in kwargs:
timeout = kwargs.pop('timeout')
if 'test' in kwargs:
test = kwargs.pop('test')
q = Queue.Queue()
def respond(value):
q.put(value)
d = func(**kwargs)
d.addCallback(respond)
try:
a = q.get(timeout=timeout)
except Queue.Empty:
assert False
if test is not None:
assert test(a)
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
def fin():
reactor.callFromThread(reactor.stop)
request.addfinalizer(fin)
print "hejsan"
@pytest.mark.slow
class TestKAppend(object):
test_nodes = 20
def test_append(self, monkeypatch):
a = KNet(self.test_nodes)
a.start()
try:
item = ["apa"]
test_str = json.dumps(item)
# set(["apa"])
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
match_str = json.dumps(item)
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "elefant", "tiger"])
test_str2 = json.dumps(["elefant", "tiger"])
do_sync(a.get_rand_node().append, key="kalas", value=test_str2, test=normal_test(True))
match_str = json.dumps(["apa", "elefant", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "tiger"])
test_str3 = json.dumps(["elefant"])
do_sync(a.get_rand_node().remove, key="kalas", value=test_str3, test=normal_test(True))
match_str = json.dumps(["apa", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "elefant", "tiger"])
test_str2 = json.dumps(["elefant", "tiger"])
do_sync(a.get_rand_node().append, key="kalas", value=test_str2, test=normal_test(True))
match_str = json.dumps(["apa", "elefant", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "elefant", "tiger"])
test_str4 = json.dumps(["lejon"])
do_sync(a.get_rand_node().remove, key="kalas", value=test_str4, test=normal_test(True))
match_str = json.dumps(["apa", "elefant", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
match_str = json.dumps(item)
do_sync(a.get_rand_node().set, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=json_test(match_str))
# Should fail
do_sync(a.get_rand_node().append, key="kalas", value="apa", test=normal_test(False))
do_sync(a.get_rand_node().set, key="kalas", value="apa", test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
# Should fail
do_sync(a.get_rand_node().append, key="kalas", value="apa", test=normal_test(False))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
finally:
import traceback
traceback.print_exc()
a.stop()
def test_set(self, monkeypatch):
a = KNet(self.test_nodes)
a.start()
try:
do_sync(a.get_rand_node().set, key="kalas", value="apa", test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
for _ in range(10):
test_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
do_sync(a.get_rand_node().set, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test(test_str))
finally:
a.stop()
def test_delete(self, monkeypatch):
a = KNet(self.test_nodes)
a.start()
try:
# Make the nodes know each other
for _ in range(10):
key_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
test_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
do_sync(a.get_rand_node().set, key=key_str, value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key=key_str, test=normal_test(test_str))
do_sync(a.get_rand_node().set, key="kalas", value="apa", test=normal_test(True))
time.sleep(.7)
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
for _ in range(3):
test_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
do_sync(a.get_rand_node().set, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test(test_str))
do_sync(a.get_rand_node().set, key="kalas", value=None, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test(None))
finally:
a.stop()
|
k8s_secret_replicator.py
|
#!/usr/bin/env python3
import kubernetes
from kubernetes.client.rest import ApiException
import threading
import logging
import os
FORMAT = "%(levelname)s: %(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(format=FORMAT)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def safe_label_get(obj, label_name, default=None):
if obj.metadata.labels is None:
return default
else:
return obj.metadata.labels.get(label_name, default)
class WatchedSecret:
def __init__(self, name, namespace):
self.name = name
self.namespace = namespace
def namespace_valid(self, namespace):
return True
class Replicator:
def __init__(self, namespace):
kubernetes.config.load_incluster_config()
self.lock = threading.Lock()
self.watched_secrets = {}
self.namespace = namespace
self.label_names = os.environ.get('SECRET_REPLICATOR_LABEL_NAMES',
'secret-replicator.daewok/replicate').split(';')
self.managed_label_name = os.environ.get('SECRET_REPLICATOR_MANGED_LABEL_NAME',
'secret-replicator.daewok/managed')
def add_secret_to_namespace(self, s, raw_secret, ns, v1):
try:
existing_secret = v1.read_namespaced_secret(s.name, ns)
except ApiException as e:
if e.status == 404:
existing_secret = None
else:
raise e
if existing_secret is None:
log.info('Creating secret %s/%s', ns, s.name)
raw_secret.metadata.resource_version = None
v1.create_namespaced_secret(ns, raw_secret)
elif safe_label_get(existing_secret, self.managed_label_name) == 'true':
log.info('Replacing secret %s/%s', ns, s.name)
raw_secret.metadata.resource_version = existing_secret.metadata.resource_version
v1.replace_namespaced_secret(s.name, ns, raw_secret)
else:
log.warn('Secret %s/%s already exists and is not managed by secret replicator',
ns, s.name)
def add_secret_to_matching_namespaces(self, s, v1, target_namespaces=None):
raw_secret = v1.read_namespaced_secret(s.name, s.namespace,
export=True,
exact=False)
if raw_secret.metadata.labels is None:
raw_secret.metadata.labels = {}
raw_secret.metadata.labels[self.managed_label_name] = "true"
if target_namespaces is None:
all_ns_objs = v1.list_namespace(watch=False).items
target_namespaces = [x.metadata.name for x in all_ns_objs]
# Don't do anything to our own namespace
if self.namespace in target_namespaces:
target_namespaces.remove(self.namespace)
for ns_name in target_namespaces:
if s.namespace_valid(ns_name):
self.add_secret_to_namespace(s, raw_secret, ns_name, v1)
def watch_for_new_namespaces(self):
v1 = kubernetes.client.CoreV1Api()
w = kubernetes.watch.Watch()
for e in w.stream(v1.list_namespace):
type = e['type']
obj = e['object']
if obj.metadata.name == self.namespace:
# Don't do anything to our own namespace.
continue
log.debug('got %s event', type)
log.debug('got %s object', obj)
if type == 'ADDED':
with self.lock:
log.info('Adding secrets to new namespace: %s', obj.metadata.name)
for s in self.watched_secrets.values():
self.add_secret_to_matching_namespaces(s, v1, target_namespaces=[obj.metadata.name])
def secret_should_be_replicated(self, s):
if safe_label_get(s, self.managed_label_name) is not None:
return False
for label_name in self.label_names:
if safe_label_get(s, label_name) is not None:
return True
return False
def watch_for_new_secrets(self):
v1 = kubernetes.client.CoreV1Api()
w = kubernetes.watch.Watch()
ns = self.namespace
for e in w.stream(v1.list_namespaced_secret, namespace=ns):
with self.lock:
type = e['type']
obj = e['object']
log.debug('got %s event', type)
log.debug('got %s object', obj)
has_label = self.secret_should_be_replicated(obj)
secret_name = obj.metadata.name
secret_ns = obj.metadata.namespace
if type == 'ADDED':
if not has_label:
continue
log.info('watching new secret %s/%s',
secret_ns, secret_name)
new_secret = WatchedSecret(secret_name, secret_ns)
self.watched_secrets[secret_name] = new_secret
self.add_secret_to_matching_namespaces(new_secret, v1)
elif type == 'DELETED':
if not has_label:
continue
log.info('stop watching secret %s/%s',
secret_ns, secret_name)
del self.watched_secrets[secret_name]
elif type == 'MODIFIED':
log.info('modified: %s/%s', secret_ns, secret_name)
if has_label:
new_secret = WatchedSecret(secret_name, secret_ns)
self.watched_secrets[secret_name] = new_secret
self.add_secret_to_matching_namespaces(new_secret, v1)
elif secret_name in self.watched_secrets:
del self.watched_secrets[secret_name]
else:
log.warn('Unknown modification type: %s', type)
log.warn('got %s object', obj)
def start(self):
self.ns_thread = threading.Thread(target=self.watch_for_new_namespaces)
self.sec_thread = threading.Thread(target=self.watch_for_new_secrets)
self.ns_thread.start()
self.sec_thread.start()
self.ns_thread.join()
self.sec_thread.join()
if __name__ == '__main__':
namespace_to_watch = os.environ.get('SECRET_REPLICATOR_NAMESPACE_TO_WATCH',
None)
if namespace_to_watch is None:
raise ValueError('SECRET_REPLICATOR_NAMESPACE_TO_WATCH must be set')
replicator = Replicator(namespace=namespace_to_watch)
replicator.start()
|
computer_vision.py
|
# ############################## Start the webserver, the opencv color grabber and the GUI #############################
import Twophase.start_server
from threading import Thread
from Twophase.vision2 import grab_colors
background_thread = Thread(target=Twophase.start_server.start, args=(8080, 20, 2))
background_thread.start()
# Server listens now on port 8080, maxlength 20 moves, timeout 2 seconds
thr = Thread(target=grab_colors, args=())
thr.start()
# Run the opencv code and detect facelet colors
import Twophase.client_gui2
# Start the GUI with several sliders to configure some opencv parameters
|
dataset.py
|
"""Data fetching with pandas
"""
# MIT License
#
# Copyright (c) 2018 Yichun Shi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import time
import math
import random
import shutil
from multiprocessing import Process, Queue
import numpy as np
import pandas as pd
queue_timeout = 600
class Dataset(object):
def __init__(self, path=None, prefix=None):
if path is not None:
self.init_from_path(path)
else:
self.data = pd.DataFrame([], columns=['path', 'abspath', 'label', 'name'])
self.prefix = prefix
self.base_seed = 0
self.batch_queue = None
self.batch_workers = None
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
return self.data[key]
def _delitem(self, key):
self.data.__delitem__(key)
@property
def num_classes(self):
return len(self.data['label'].unique())
@property
def classes(self):
return self.data['label'].unique()
@property
def size(self):
return self.data.shape[0]
@property
def loc(self):
return self.data.loc
@property
def iloc(self):
return self.data.iloc
def init_from_path(self, path):
path = os.path.expanduser(path)
_, ext = os.path.splitext(path)
if os.path.isdir(path):
self.init_from_folder(path)
elif ext == '.txt':
self.init_from_list(path)
else:
raise ValueError('Cannot initialize dataset from path: %s\n\
It should be either a folder, .txt or .hdf5 file' % path)
# print('%d images of %d classes loaded' % (len(self.images), self.num_classes))
def init_from_folder(self, folder):
folder = os.path.abspath(os.path.expanduser(folder))
class_names = os.listdir(folder)
class_names.sort()
paths = []
labels = []
names = []
for label, class_name in enumerate(class_names):
classdir = os.path.join(folder, class_name)
if os.path.isdir(classdir):
images_class = os.listdir(classdir)
images_class.sort()
images_class = [os.path.join(class_name,img) for img in images_class]
paths.extend(images_class)
labels.extend(len(images_class) * [label])
names.extend(len(images_class) * [class_name])
abspaths = [os.path.join(folder,p) for p in paths]
self.data = pd.DataFrame({'path': paths, 'abspath': abspaths,
'label': labels, 'name': names})
self.prefix = folder
def init_from_list(self, filename, folder_depth=2):
with open(filename, 'r') as f:
lines = f.readlines()
lines = [line.strip().split(' ') for line in lines]
abspaths = [os.path.abspath(line[0]) for line in lines]
paths = ['/'.join(p.split('/')[-folder_depth:]) for p in abspaths]
if len(lines[0]) == 2:
labels = [int(line[1]) for line in lines]
names = [str(lb) for lb in labels]
elif len(lines[0]) == 1:
names = [p.split('/')[-folder_depth] for p in abspaths]
_, labels = np.unique(names, return_inverse=True)
else:
raise ValueError('List file must be in format: "fullpath(str) \
label(int)" or just "fullpath(str)"')
self.data = pd.DataFrame({'path': paths, 'abspath': abspaths,
'label': labels, 'name': names})
self.prefix = abspaths[0].split('/')[:-folder_depth]
#
# Data Loading
#
def set_base_seed(self, base_seed=0):
self.base_seed = base_seed
def random_samples_from_class(self, label, num_samples, exception=None):
# indices_temp = self.class_indices[label]
indices_temp = list(np.where(self.data['label'].values == label)[0])
if exception is not None:
indices_temp.remove(exception)
assert len(indices_temp) > 0
# Sample indices multiple times when more samples are required than present.
indices = []
iterations = int(np.ceil(1.0*num_samples / len(indices_temp)))
for i in range(iterations):
sample_indices = np.random.permutation(indices_temp)
indices.append(sample_indices)
indices = list(np.concatenate(indices, axis=0)[:num_samples])
return indices
def get_batch_indices(self, batch_format):
''' Get the indices from index queue and fetch the data with indices.'''
indices_batch = []
batch_size = batch_format['size']
num_classes = batch_format['num_classes']
assert batch_size % num_classes == 0
num_samples_per_class = batch_size // num_classes
idx_classes = np.random.permutation(self.classes)[:num_classes]
indices_batch = []
for c in idx_classes:
indices_batch.extend(self.random_samples_from_class(c, num_samples_per_class))
return indices_batch
def get_batch(self, batch_format):
indices = self.get_batch_indices(batch_format)
batch = {}
for column in self.data.columns:
batch[column] = self.data[column].values[indices]
return batch
# Multithreading preprocessing images
def start_batch_queue(self, batch_format, proc_func=None, maxsize=1, num_threads=3):
self.batch_queue = Queue(maxsize=maxsize)
def batch_queue_worker(seed):
np.random.seed(seed+self.base_seed)
while True:
batch = self.get_batch(batch_format)
if proc_func is not None:
batch['image'] = proc_func(batch['abspath'])
self.batch_queue.put(batch)
self.batch_workers = []
for i in range(num_threads):
worker = Process(target=batch_queue_worker, args=(i,))
worker.daemon = True
worker.start()
self.batch_workers.append(worker)
def pop_batch_queue(self, timeout=queue_timeout):
return self.batch_queue.get(block=True, timeout=timeout)
def release_queue(self):
if self.index_queue is not None:
self.index_queue.close()
if self.batch_queue is not None:
self.batch_queue.close()
if self.index_worker is not None:
self.index_worker.terminate()
del self.index_worker
self.index_worker = None
if self.batch_workers is not None:
for w in self.batch_workers:
w.terminate()
del w
self.batch_workers = None
|
fsapi_core.py
|
import inspect
import sys
import threading
import requests
import re
import time
from requests.exceptions import ReadTimeout, ConnectTimeout
from xml.dom import minidom
from .fsapi_exception import *
class FSAPI_Node_Blocked_Exception(Exception):
def __init__(self, message="Device is controlled from a different controller"):
self.message = message
super().__init__(self.message)
class FSAPI_Device_Offline_Exception(Exception):
def __init__(self, message="Device is offline"):
self.message = message
super().__init__(self.message)
class FSAPI_Session_Invalid_Exception(Exception):
def __init__(self, message="Session is invalid"):
self.message = message
super().__init__(self.message)
class FSAPI_Context_Manager:
def __init__(self, fsapi_reference, pin = "1234"):
self._access_locker = threading.Lock()
self._req_session = requests.Session()
self._fsapi = fsapi_reference
self._req_session.headers.update({"pin": pin})
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# time.sleep(0.01) # to throttle requests
pass
def get_new_session(self):
pass
def invalidate_session(self):
pass
def get_request(self, url, params):
with self._access_locker:
self._fsapi.write_log(f"get_request: {url} with {params}")
try:
result = self._req_session.get(url, params=params, timeout=2)
if result.status_code == 200:
self._fsapi.write_log(f"get_request DONE OK: {url} with {params}")
return result.content
else:
self._fsapi.write_log(f"get_request DONE: {url} with {params}, resulted in {result.status_code}")
return result.status_code
except ReadTimeout:
self._fsapi.write_log(f"get_request ReadTimeout")
return 408
except ConnectTimeout:
self._fsapi.write_log(f"get_request ConnectTimeout")
return 408
class FSAPI(object):
_low_level_property_repository_factory = []
_callback_on_update = None
_callback_method_on_update = None
_callback_method_on_update_context = None
"""
Retry logic:
In getter und setter un deleter einbauen:
Wenn 408, dann retry in 2sec, dreimal, danach permanent failed
Wenn 404, dann session_id erneuern, nochmal probieren, dann permanent failed
"""
@staticmethod
def register_class(clss, fsapi_property_alias, fsapi_set_method_alias: str, fsapi_type: str):
typ_key = clss.key.lower()
if fsapi_type == 'void1':
def executor(s): return s._repository[typ_key].set(1)
setattr(FSAPI, fsapi_set_method_alias, executor)
else:
def getter(s): return s._repository[typ_key].get()
def setter(s, v): return s._repository[typ_key].set(v)
def deleter(s): return s._repository[typ_key].dele()
setattr(FSAPI, fsapi_property_alias, property(getter, setter, deleter))
if fsapi_set_method_alias is not None:
setattr(FSAPI, fsapi_set_method_alias, setter)
FSAPI._low_level_property_repository_factory.append(lambda FSAPI_inst: FSAPI_inst._register(clss.__name__))
@staticmethod
def register_high_level_property(property_name, getter, setter, dependsOn: [type] = []):
setattr(FSAPI, property_name, property(getter, setter))
@staticmethod
def register_proxy_method(method_name, method_implementation, dependsOn: [type] = []):
setattr(FSAPI, method_name, method_implementation)
def __enter__(self):
pass
def __exit__(self, type, value, tb):
self.stop()
def __init__(self, fsapi_device_url, pin, notification_thread_name=None):
self._pin = pin
self._repository = {}
self.settables = []
self.gettables = []
self._access_locker = FSAPI_Context_Manager(self, pin=pin)
self._notification_thread_name = notification_thread_name
self._cached_webfsapi = None
self._fsapi_device_url = fsapi_device_url
self._callback_method_log = None
self._callback_method_on_update = None
for registrator in FSAPI._low_level_property_repository_factory:
registrator(self)
def _register(self, typ_name):
typ = globals()[typ_name]
node_instance = typ()
node_instance._inject_fsapi(self)
if node_instance.can_get:
self.gettables.append(node_instance.fsapi_property_alias)
if node_instance.can_set:
self.settables.append(node_instance.fsapi_property_alias)
typ_key = typ.key.lower()
self._repository[typ_key] = node_instance
def _get_webfsapi(self):
if self._cached_webfsapi is None:
r = requests.get(self._fsapi_device_url)
xml = minidom.parseString(r.content).firstChild
webfsapi = next(iter(xml.getElementsByTagName('webfsapi')))
self._cached_webfsapi = webfsapi.firstChild.data
return self._cached_webfsapi
def report(self, prefix="", key_search=".*"):
for item_key in self._repository:
if item_key == FSAPI_Node_Notifications.key:
continue
if not re.search(key_search, item_key):
continue
if self._repository[item_key].can_get:
yield f"Retrieving: {prefix} {item_key}"
value = self._repository[item_key].get()
yield f" Result: {value}"
def register_callback_function(self, func):
self._callback_on_update = func
def register_callback_method(self, context, func):
self._callback_method_on_update_context = context
self._callback_method_on_update = func
def register_exception_callback_method(self, context, func):
self._callback_method_on_error_context = context
self._callback_method_on_error = func
def register_logging_method(self, context, func):
self._callback_method_log_context = context
self._callback_method_log = func
def stop_listening_to_notifications(self):
self.write_log("Stopping myself now")
self._listen_active = False
try:
del self.session_id
except:
pass
try:
self._listen_thread.join(1)
except:
pass
def start_listening_to_notifications(self):
self.write_log("Start Listening to Notifications now")
if self._notification_thread_name is not None:
self._listen_thread = threading.Thread(
target=self._listen, name=self._notification_thread_name + '_FSAPI_notify')
self._listen_thread.start()
def __del__(self):
self.stop_listening_to_notifications()
def write_log(self, log_txt):
if self._callback_method_log is not None:
self._callback_method_log(self._callback_method_log_context, log_txt)
else:
print(log_txt)
def _listen(self):
# __ = list(self.report())
time.sleep(5)
self._listen_active = True
self.write_log("Listening to changes is enabled now")
while self._listen_active:
try:
self.utilize_notifications()
except ConnectionError:
self.write_log("Connection failed, sleeping 10s before trying to get next set of notifications")
time.sleep(10)
except (RuntimeError, TypeError, NameError, ValueError, Exception) as e:
if self._callback_method_on_error is not None:
self.write_log("Calling callback on error")
try:
self._callback_method_on_error(self._callback_method_on_error_context, e)
except (RuntimeError, TypeError, NameError, ValueError, Exception) as x:
self.write_log("Error-callback failed" + repr(x))
else:
self.write_log("Exception occured in listening to Notifications: " + e)
# time.sleep(2)
# raise
except:
self.write_log("Something really bad happened")
self.write_log("Listening to changes is disabled now!")
# Read-only ###################################################################################
def utilize_notifications(self):
self.write_log("Utilizing notifications:")
res = self._notifies
if isinstance(res, int):
if res == 404:
del self.session_id
return
else:
self.write_log(f"Notification with {res}")
return
if res is None:
self.write_log("Notifications with nothing")
return
for line in res:
node = line['node']
value = line['value']
if not node in self._repository.keys():
continue
self.write_log(f"Updating notified {self._notification_thread_name}: {node} => {self._repository[node].fsapi_property_alias} with {value}")
self._repository[node]._update_cache(value)
if self._callback_on_update is not None:
self._callback_on_update(self, self._repository[node].fsapi_property_alias, value)
if self._callback_method_on_update is not None:
self._callback_method_on_update(self._callback_method_on_update_context, self, self._repository[node].fsapi_property_alias, value)
# Order matters: FSAPI needs to be declared before dependend objects can be loaded and registered
# therefor the noqa was added
from .netRemote_session import * # noqa
from .netRemote_sys import * # noqa
from .netRemote_sys_caps import * # noqa
from .netRemote_sys_audio import * # noqa
from .netRemote_play import * # noqa
from .netRemote_nav import * # noqa
from .netRemote_multiroom_caps import * # noqa
from .netRemote_multiroom_client import * # noqa
from .netRemote_multiroom_device import * # noqa
from .netRemote_multiroom_group import * # noqa
from .fsapi_extensions import * # noqa
|
rrino.py
|
import os
import json
import urllib2
import datetime
import threading
from httplib import BadStatusLine
import weechat
SCRIPT_NAME = 'rrino'
SCRIPT_AUTHOR = 'Matteo Bigoi <bigo@crisidev.org>'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'MIT'
SCRIPT_DESC = 'Pass highlights and private messages to the OS X 10.8+ Notification Center'
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', '')
DEFAULT_OPTIONS = {
'show_highlights': 'on',
'show_private_message': 'on',
'show_message_text': 'on',
'ignore_old_messages': 'off',
'server_addr': '127.0.0.1',
}
for key, val in DEFAULT_OPTIONS.items():
if not weechat.config_is_set_plugin(key):
weechat.config_set_plugin(key, val)
weechat.hook_print('', 'irc_privmsg', '', 1, 'notify', '')
def push(data, tag, port):
try:
req = urllib2.Request('http://{}:{}/notify'.format(weechat.config_get_plugin('server_addr'), port))
req.add_header('Content-Type', 'application/json')
resp = urllib2.urlopen(req, json.dumps(data), timeout=0.5)
if resp.getcode() != 200:
weechat.prnt(
"",
"%srrino http server %s:%s error, status code %s" % (weechat.prefix("error"), tag, port, resp.getcode())
)
except Exception:
pass
def push_notification(user, message):
rrino_dir = os.path.join(weechat.info_get('weechat_dir', ''), 'rrino')
for client in os.listdir(rrino_dir):
client_split = client.split(":")
if len(client_split) == 2:
tag, port = client_split
data = {'from': '{}: {}'.format(tag, user), 'message': message}
t = threading.Thread(target=push, args=(data, tag, port))
t.daemon = True
t.start()
def notify(data, buffer, date, tags, displayed, highlight, user, message):
# ignore if it's yourself
own_nick = weechat.buffer_get_string(buffer, 'localvar_nick')
if user == own_nick or user == ('@%s' % own_nick):
return weechat.WEECHAT_RC_OK
if weechat.config_get_plugin('ignore_old_messages') == 'on':
message_time = datetime.datetime.utcfromtimestamp(int(date))
now_time = datetime.datetime.utcnow()
# ignore if the message is greater than 5 seconds old
if (now_time - message_time).seconds > 5:
return weechat.WEECHAT_RC_OK
if weechat.config_get_plugin('show_message_text') == 'off':
message = 'Private message'
if weechat.config_get_plugin('show_highlights') == 'on' and int(highlight):
channel = weechat.buffer_get_string(buffer, 'localvar_channel')
user = '{}@{}'.format(user, channel)
push_notification(user, message)
elif weechat.config_get_plugin('show_private_message') == 'on' and 'notify_private' in tags:
push_notification(user, message)
return weechat.WEECHAT_RC_OK
|
ffmpeg.py
|
import subprocess
from threading import Thread
from queue import Queue
queue_720 = Queue()
queue_480 = Queue()
def ffmpeg_720():
try:
file = queue_720.get()
filename = file.split('.')
output_name = filename[0] + '_720p.mp4'
cmd = 'ffmpeg -i {input} -b:v {bit_rate}M -r {fps} -s hd{res} {output}'
cmd = cmd.format(input=file, bit_rate=2, fps=30, res=720, output=output_name)
subprocess.run(cmd)
print('Convert ' + file + ' to 720p successfully.')
except Exception:
print(Exception)
def ffmpeg_480():
try:
file = queue_480.get()
filename = file.split('.')
output_name = filename[0] + '_480p.mp4'
cmd = 'ffmpeg -i {input} -b:v {bit_rate}M -r {fps} -s hd{res} {output}'
cmd = cmd.format(input=file, bit_rate=2, fps=30, res=480, output=output_name)
subprocess.run(cmd)
print('Convert ' + file + ' to 480p successfully.')
except Exception:
print(Exception)
def main():
thread1 = Thread(target=ffmpeg_720)
thread2 = Thread(target=ffmpeg_480)
try:
queue_480.put("video.mp4")
queue_720.put("video.mp4")
except Exception:
print(Exception)
thread1.start()
thread2.start()
print("finished.")
if __name__ == '__main__':
main()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = QMessageBox.question(self,
"Electrum - " + _("Enable update check"),
_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"),
QMessageBox.Yes,
QMessageBox.No)
config.set_key('check_updates', choice == QMessageBox.Yes, save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
self.history_model.on_fee_histogram()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Trezarcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Trezarcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('trezarcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Trezarcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Trezarcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Trezarcoin address where the payment should be received. Note that each payment request uses a different Trezarcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Trezarcoin addresses.'),
_('The trezarcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Trezarcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Trezarcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Trezarcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Trezarcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Trezarcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.print_error(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid trezarcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Trezarcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Trezarcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("trezarcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
#traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
GUI_multiple_threads_sleep_freeze.py
|
'''
Created on May 28, 2019
Ch06
@author: Burkhard A. Meier
'''
#======================
# imports
#======================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import Menu
from tkinter import messagebox as msg
from tkinter import Spinbox
from time import sleep
from Ch06_Code.ToolTip import ToolTip
from threading import Thread
GLOBAL_CONST = 42
#=====================================================
class OOP():
def __init__(self): # Initializer method
# Create instance
self.win = tk.Tk()
# Add a title
self.win.title("Python GUI")
self.create_widgets()
# Button callback
def click_me(self):
self.action.configure(text='Hello ' + self.name.get() +
' ' + self.number_chosen.get())
# Non-threaded code with sleep freezes the GUI
for idx in range(10):
sleep(5)
self.scrol.insert(tk.INSERT, str(idx) + '\n')
# Spinbox callback
def _spin(self):
value = self.spin.get()
print(value)
self.scrol.insert(tk.INSERT, value + '\n')
# GUI Callback
def checkCallback(self, *ignored_args):
# only enable one checkbutton
if self.chVarUn.get(): self.check3.configure(state='disabled')
else: self.check3.configure(state='normal')
if self.chVarEn.get(): self.check2.configure(state='disabled')
else: self.check2.configure(state='normal')
# Radiobutton Callback
def radCall(self):
radSel = self.radVar.get()
if radSel == 0: self.mighty2.configure(text='Blue')
elif radSel == 1: self.mighty2.configure(text='Gold')
elif radSel == 2: self.mighty2.configure(text='Red')
# update progressbar in callback loop
def run_progressbar(self):
self.progress_bar["maximum"] = 100
for i in range(101):
sleep(0.05)
self.progress_bar["value"] = i # increment progressbar
self.progress_bar.update() # have to call update() in loop
self.progress_bar["value"] = 0 # reset/clear progressbar
def start_progressbar(self):
self.progress_bar.start()
def stop_progressbar(self):
self.progress_bar.stop()
def progressbar_stop_after(self, wait_ms=1000):
self.win.after(wait_ms, self.progress_bar.stop)
def usingGlobal(self):
global GLOBAL_CONST
print(GLOBAL_CONST)
GLOBAL_CONST = 777
print(GLOBAL_CONST)
def method_in_a_thread(self):
print('Hi, how are you?')
# Exit GUI cleanly
def _quit(self):
self.win.quit()
self.win.destroy()
exit()
#####################################################################################
def create_widgets(self):
tabControl = ttk.Notebook(self.win) # Create Tab Control
tab1 = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab1, text='Tab 1') # Add the tab
tab2 = ttk.Frame(tabControl) # Add a second tab
tabControl.add(tab2, text='Tab 2') # Make second tab visible
tabControl.pack(expand=1, fill="both") # Pack to make visible
# LabelFrame using tab1 as the parent
mighty = ttk.LabelFrame(tab1, text=' Mighty Python ')
mighty.grid(column=0, row=0, padx=8, pady=4)
# Modify adding a Label using mighty as the parent instead of win
a_label = ttk.Label(mighty, text="Enter a name:")
a_label.grid(column=0, row=0, sticky='W')
# Adding a Textbox Entry widget
self.name = tk.StringVar()
self.name_entered = ttk.Entry(mighty, width=24, textvariable=self.name)
self.name_entered.grid(column=0, row=1, sticky='W')
# Adding a Button
self.action = ttk.Button(mighty, text="Click Me!", command=self.click_me)
self.action.grid(column=2, row=1)
ttk.Label(mighty, text="Choose a number:").grid(column=1, row=0)
number = tk.StringVar()
self.number_chosen = ttk.Combobox(mighty, width=14, textvariable=number, state='readonly')
self.number_chosen['values'] = (1, 2, 4, 42, 100)
self.number_chosen.grid(column=1, row=1)
self.number_chosen.current(0)
# Adding a Spinbox widget
self.spin = Spinbox(mighty, values=(1, 2, 4, 42, 100), width=5, bd=9, command=self._spin) # using range
self.spin.grid(column=0, row=2, sticky='W') # align left
# Using a scrolled Text control
scrol_w = 40; scrol_h = 10 # increase sizes
self.scrol = scrolledtext.ScrolledText(mighty, width=scrol_w, height=scrol_h, wrap=tk.WORD)
self.scrol.grid(column=0, row=3, sticky='WE', columnspan=3)
for child in mighty.winfo_children(): # add spacing to align widgets within tabs
child.grid_configure(padx=4, pady=2)
#=====================================================================================
# Tab Control 2 ----------------------------------------------------------------------
self.mighty2 = ttk.LabelFrame(tab2, text=' The Snake ')
self.mighty2.grid(column=0, row=0, padx=8, pady=4)
# Creating three checkbuttons
chVarDis = tk.IntVar()
check1 = tk.Checkbutton(self.mighty2, text="Disabled", variable=chVarDis, state='disabled')
check1.select()
check1.grid(column=0, row=0, sticky=tk.W)
chVarUn = tk.IntVar()
check2 = tk.Checkbutton(self.mighty2, text="UnChecked", variable=chVarUn)
check2.deselect()
check2.grid(column=1, row=0, sticky=tk.W)
chVarEn = tk.IntVar()
check3 = tk.Checkbutton(self.mighty2, text="Enabled", variable=chVarEn)
check3.deselect()
check3.grid(column=2, row=0, sticky=tk.W)
# trace the state of the two checkbuttons
chVarUn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
chVarEn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
# First, we change our Radiobutton global variables into a list
colors = ["Blue", "Gold", "Red"]
# create three Radiobuttons using one variable
self.radVar = tk.IntVar()
# Next we are selecting a non-existing index value for radVar
self.radVar.set(99)
# Now we are creating all three Radiobutton widgets within one loop
for col in range(3):
curRad = tk.Radiobutton(self.mighty2, text=colors[col], variable=self.radVar,
value=col, command=self.radCall)
curRad.grid(column=col, row=1, sticky=tk.W) # row=6
# And now adding tooltips
ToolTip(curRad, 'This is a Radiobutton control')
# Add a Progressbar to Tab 2
self.progress_bar = ttk.Progressbar(tab2, orient='horizontal', length=286, mode='determinate')
self.progress_bar.grid(column=0, row=3, pady=2)
# Create a container to hold buttons
buttons_frame = ttk.LabelFrame(self.mighty2, text=' ProgressBar ')
buttons_frame.grid(column=0, row=2, sticky='W', columnspan=2)
# Add Buttons for Progressbar commands
ttk.Button(buttons_frame, text=" Run Progressbar ", command=self.run_progressbar).grid(column=0, row=0, sticky='W')
ttk.Button(buttons_frame, text=" Start Progressbar ", command=self.start_progressbar).grid(column=0, row=1, sticky='W')
ttk.Button(buttons_frame, text=" Stop immediately ", command=self.stop_progressbar).grid(column=0, row=2, sticky='W')
ttk.Button(buttons_frame, text=" Stop after second ", command=self.progressbar_stop_after).grid(column=0, row=3, sticky='W')
for child in buttons_frame.winfo_children():
child.grid_configure(padx=2, pady=2)
for child in self.mighty2.winfo_children():
child.grid_configure(padx=8, pady=2)
# Creating a Menu Bar
menu_bar = Menu(self.win)
self.win.config(menu=menu_bar)
# Add menu items
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="New")
file_menu.add_separator()
file_menu.add_command(label="Exit", command=self._quit)
menu_bar.add_cascade(label="File", menu=file_menu)
# Display a Message Box
def _msgBox():
msg.showinfo('Python Message Info Box', 'A Python GUI created using tkinter:\nThe year is 2019.')
# Add another Menu to the Menu Bar and an item
help_menu = Menu(menu_bar, tearoff=0)
help_menu.add_command(label="About", command=_msgBox) # display messagebox when clicked
menu_bar.add_cascade(label="Help", menu=help_menu)
# Change the main windows icon
self.win.iconbitmap('pyc.ico')
# It is not necessary to create a tk.StringVar()
# strData = tk.StringVar()
strData = self.spin.get()
print("Spinbox value: " + strData)
# call function
self.usingGlobal()
self.name_entered.focus()
# Add Tooltips -----------------------------------------------------
# Add a Tooltip to the Spinbox
ToolTip(self.spin, 'This is a Spinbox control')
# Add Tooltips to more widgets
ToolTip(self.name_entered, 'This is an Entry control')
ToolTip(self.action, 'This is a Button control')
ToolTip(self.scrol, 'This is a ScrolledText control')
#======================
# Start GUI
#======================
oop = OOP()
# Running methods in Threads
run_thread = Thread(target=oop.method_in_a_thread)
oop.win.mainloop()
|
covid_certificate.py
|
""" Generate certificate required to leave home during Covid confinement in France
As part of the management of the Covid-19 pandemic in France, the population
may be confined at home for some weeks when the situation becomes critical.
During these periods, residents can leave their place of confinement only in
a (very) limited number of cases and are then required to bear a certificate
indicating their place of residence, the reason why they are leaving it, and
the time they are leaving it. This certificate may be hand-written, printed on
paper, or take the form of a PDF file and QR code generated by a web service
run by the authorities.
The web service requires the user to key in personal information (fist name,
last name, birth date, birth place, address of residence) every time a
certificate is to be generated.
This applet (which runs under Pythonista, on iOS) provides a convenient
alternative to the official service:
- Personal details for yourself and all the persons you are confined with need
only be keyed in once, in file `persons.json`, in the same directory as
the applet.
- When run, the applet displays a list of persons for whom a certificate can be
generated.
- When a person is selected, an email is prepared, with the PDF file for the
certificate attached, and you only have to press the send button.
**Disclaimers**
In all cases tested, this applet generates the same output (PDF file and
QR code) as the official site. However, no guarantee is provided that there
are no cases where the two differ. Furthermore, the output generated by the
official site might change without warning in the future, in which case
the applet would no longer generate the same output, until it is updated -
Use at your own risk.
The objective of this applet is to facilitate the generation of certificates,
not to work around the rules of confinement - Use in a responsible way.
**Dependencies**
- busy_view
- mail_compose
- pdfrw
**Revision history**
- 5-Avr-2020 - Created this module.
- 6-Avr-2020 - Initial release, with only QR code generation.
- 21-Avr-2020 - Second release, with PDF file generation and full UI.
- 1-Nov-2020 - Third release, with PDF file updated to the new official format.
- 28-Nov-2020 - Fourth release, with PDF file updated to the new official format. """
from dataclasses import dataclass, field as dataclass_field
from datetime import datetime, timedelta
import json
from pathlib import Path
from time import sleep
from threading import Thread
from typing import ClassVar, List
import sys
from PIL import Image, ImageDraw, ImageFont
import qrcode
from pdfrw import PageMerge, PdfReader, PdfWriter
from pdfrw.pagemerge import RectXObj
PERSONS_FILE = "persons.json"
TEMPLATE_PDF = 'attestation-deplacement-fr 2020 11 28.pdf'
CERTIFICATE_PDF = 'Attestation.pdf'
REASONS = ["travail", "achats_culturel_cultuel", "sante", "famille", "sport_animaux"]
REASONS_TITLES = ["travail", "achats", "sante", "famille", "sport"]
if sys.platform == 'ios':
import ui
from busy_view import BusyView
from mail_compose import mail_compose
CONTROLS_HEIGHT = 30
CONTROLS_HORIZONTAL_MARGIN = 16
CONTROLS_VERTICAL_MARGIN = 4
SEPARATOR_COLOR = '#cccccc' # iOS light grey
class PersonsView(ui.View):
""" UI for presenting a list of persons and generating certificates.
Arguments
---------
persons: `List[` :class:`Person` `]`
List of persons to be displayed. """
def __init__(self, persons: List["Person"]):
self.persons = persons
self.now = datetime.now()
self.name = "Attestion de déplacement"
self.flex = "WH"
self.background_color = 'white'
y = CONTROLS_VERTICAL_MARGIN
self.reason_selector = ui.SegmentedControl(
y=y,
width=300,
segments=REASONS_TITLES,
selected_index=4)
y += CONTROLS_HEIGHT + CONTROLS_VERTICAL_MARGIN
self.generated_label1 = ui.Label(
text=f"Créé à : {self.now.strftime('%Hh%M')} - ")
self.generated_label1.size_to_fit()
self.generated_label1.y = (
y + (CONTROLS_HEIGHT - self.generated_label1.height) / 2)
self.generated_textfield = ui.TextField(
y=y,
width=50,
height=CONTROLS_HEIGHT,
keyboard_type=ui.KEYBOARD_NUMBER_PAD,
placeholder="0",
delegate=self)
self.generated_label2 = ui.Label(
text=" min")
self.generated_label2.size_to_fit()
self.generated_label2.y = (
y + (CONTROLS_HEIGHT - self.generated_label1.height) / 2)
y += CONTROLS_HEIGHT + CONTROLS_VERTICAL_MARGIN
self.start_label1 = ui.Label()
self.start_textfield = ui.TextField(
y=y,
width=50,
height=CONTROLS_HEIGHT,
keyboard_type=ui.KEYBOARD_NUMBER_PAD,
placeholder="0")
self.start_label2 = ui.Label(
text=" min")
self.start_label2.size_to_fit()
self.start_label2.y = y + (CONTROLS_HEIGHT - self.start_label2.height) / 2
y += CONTROLS_HEIGHT + CONTROLS_VERTICAL_MARGIN
self.separator_line = ui.View(
y=y,
height=1,
border_width=1,
border_color=SEPARATOR_COLOR)
y += 1
self.tableview = ui.TableView(
y=y,
data_source=self,
delegate=self)
self.busy = BusyView()
for view in [self.reason_selector, self.start_label1,
self.start_textfield, self.start_label2,
self.generated_label1, self.generated_textfield,
self.generated_label2, self.separator_line,
self.tableview, self.busy]:
self.add_subview(view)
def layout(self) -> None:
""" [ui.View] Update position and size of controls. """
inset = self.objc_instance.safeAreaInsets()
width = self.width - inset.left - inset.right
height = self.height - inset.bottom
self.reason_selector.x = (inset.left
+ (width - self.reason_selector.width) / 2)
self.generated_label1.x = self.reason_selector.x
self.generated_textfield.x = (self.generated_label1.x
+ self.generated_label1.width)
self.generated_label2.x = (self.generated_textfield.x
+ self.generated_textfield.width)
self.start_label1.text = (
f"Début de sortie : {self.generated_time().strftime('%Hh%M')} + ")
self.start_label1.size_to_fit()
self.start_label1.x = self.reason_selector.x
self.start_label1.y = (self.start_textfield.y
+ (CONTROLS_HEIGHT - self.start_label1.height) / 2)
self.start_textfield.x = self.start_label1.x + self.start_label1.width
self.start_label2.x = self.start_textfield.x + self.start_textfield.width
self.separator_line.x = inset.left
self.separator_line.width = width
self.tableview.x = inset.left
self.tableview.width = width
self.tableview.height = height - self.tableview.y
def textfield_did_change(self, textfield: ui.TextField) -> None:
""" [ui.TextField] Update `start` field when `generated` field changes. """
self.layout()
def tableview_number_of_rows(self,
tableview: ui.TableView,
section: int) -> int:
""" [ui.TableView] Return number rows = # persons. """
return len(self.persons)
def tableview_cell_for_row(self,
tableview: ui.TableView,
section: int,
row: int) -> ui.TableViewCell:
""" [ui.TableView] Display the first name of a given person. """
cell = ui.TableViewCell()
cell.selectable = False
cell.text_label.text = self.persons[row].first_name
return cell
def tableview_did_select(self,
tableview: ui.TableView,
section: int,
row: int) -> None:
""" [ui.TableView] User selected a person. """
def continuation():
nonlocal self
person = self.persons[row]
start = self.start_time()
pdf_file = generate_certificate_pdf(
person=person,
reason=self.reason_selector.selected_index,
start=start,
generated=self.generated_time())
# Displaying a BusyView is necessary, because generate_certificate_pdf()
# takes more than a fraction of a second to run. However, it does
# not take long enough for the user to "see" BusyView, which seems
# to flash on and off very quickly. The following extra delay is
# only there for the user to have time to "see" BusyView before it
# goes away.
sleep(.3)
self.busy.hide()
weekdays = ['lundi', 'mardi', 'mercredi',
'jeudi', 'vendredi', 'samedi', 'dimanche']
subject = (f"Attestion de déplacement - {weekdays[start.weekday()]} "
f"{start.strftime('%Hh%M')}")
mail_compose(subject=subject,
recipients=[person.email],
filename=pdf_file,
mime_type='application/pdf')
self.busy.show()
Thread(target=continuation).start()
def close_and_exit(self, sender: ui.Button) -> None:
""" [ui.Button] Close the view and exit app. """
self.close()
def generated_time(self) -> datetime:
""" Return the time the certificate was generated, as a datetime. """
return (self.now
- timedelta(minutes=int(self.generated_textfield.text or '0')))
def start_time(self) -> datetime:
""" Return the time the certificate starts, as a datetime. """
return (self.generated_time()
+ timedelta(minutes=int(self.start_textfield.text or '0')))
@dataclass
class Person:
""" Represent a person for whom a certificate can be generated. """
first_name: str
last_name: str
birthdate: str
birthplace: str
address: str
postal_code: str
city: str
email: str
@dataclass
class Field:
""" Represent a field in the PDF certificate file. """
x: float
y: float
scale: float
filename: str = dataclass_field(init=False)
count: ClassVar[int] = 0
def __post_init__(self):
self.filename = f'temp{Field.count}.pdf'
Field.count += 1
@dataclass
class TextField(Field):
""" Represent a text field in the PDF certificate file. """
text: str
font: ImageFont.FreeTypeFont
def __post_init__(self):
super().__post_init__()
w, h = self.font.getsize(self.text)
img = Image.new(mode='1', size=(w, h), color='white')
ImageDraw.Draw(img).text((0, 0), self.text, font=self.font, fill='black')
img.save(self.filename)
@dataclass
class QRCodeField(Field):
""" Represent a QRCode field in the PDF certificate file. """
person: Person
reason: int
start: datetime
generated: datetime
def __post_init__(self):
super().__post_init__()
QRCODE_SIZE_PX = 152 # Good compromise between image size and quality
person = self.person
img = qrcode.make(
f"Cree le: {self.generated.strftime('%d/%m/%Y a %Hh%M')};\n"
f"Nom: {person.last_name};\nPrenom: {person.first_name};\n"
f"Naissance: {person.birthdate} a {person.birthplace};\n"
f"Adresse: {person.address} {person.postal_code} {person.city};\n"
f"Sortie: {self.start.strftime('%d/%m/%Y a %H:%M')};\n"
f"Motifs: {REASONS[self.reason]}"
).resize(size=(QRCODE_SIZE_PX, QRCODE_SIZE_PX))
img.save(self.filename)
def generate_certificate_pdf(person: Person,
reason: int,
start: datetime,
generated: datetime) -> str:
""" Generate the certificate required when leaving the place of confinement.
Arguments
---------
person: :class:`Person`
For whom the certificate is generated.
reason: `int`
For which the person is leaving her place of confinement. This is an
index into ``REASONS``.
start: `datetime`
Date and time the person is leaving her place of confinement.
generated: `datetime`
Date and time the certificate is generated.
Returns
-------
str
Path to the PDF file generated. """
def cm_to_point(cm: float) -> int:
return int(cm / 2.54 * 72)
if sys.platform == 'ios':
LEGEND_FONT = 'Helvetica'
LEGEND_FONT_SIZE = 24
else:
LEGEND_FONT = 'arial.ttf'
LEGEND_FONT_SIZE = 24
font = ImageFont.truetype(LEGEND_FONT, LEGEND_FONT_SIZE)
fields = [
TextField(text=f"{person.first_name} {person.last_name}",
font=font,
x=3.27, y=24.8, scale=.47),
TextField(text=person.birthdate,
font=font,
x=3.27, y=24.15, scale=.47),
TextField(text=person.birthplace,
font=font,
x=7.55, y=24.15, scale=.47),
TextField(text=f"{person.address} {person.postal_code} {person.city} ",
font=font,
x=3.68, y=23.476, scale=.47),
TextField(text="X" if reason == 0 else " ",
font=font,
x=1.625, y=19.52, scale=.37),
TextField(text="X" if reason == 1 else " ",
font=font,
x=1.625, y=17.02, scale=.37),
TextField(text="X" if reason == 2 else " ",
font=font,
x=1.625, y=15.32, scale=.37),
TextField(text="X" if reason == 3 else " ",
font=font,
x=1.625, y=14.47, scale=.37),
TextField(text="X" if reason == 4 else " ",
font=font,
x=1.66, y=12.32, scale=.37),
TextField(text=person.city,
font=font,
x=2.8, y=2.7, scale=.47),
TextField(text=start.strftime('%d/%m/%Y'),
font=font,
x=2.22, y=2.05, scale=.47),
TextField(text=start.strftime('%H:%M'),
font=font,
x=8.02, y=2.05, scale=.47),
QRCodeField(person=person, reason=reason, start=start, generated=generated,
x=15.35, y=.9, scale=.65)
]
def set_position(obj: RectXObj, x: float, y: float, scale: float) -> None:
obj.x = cm_to_point(x)
obj.y = cm_to_point(y)
obj.w *= scale
# Generate page 1
page1_xobj = PageMerge(PdfReader(TEMPLATE_PDF).pages[0])
for field in fields:
pdf = PdfReader(field.filename).pages[0]
page1_xobj.add(pdf)
set_position(page1_xobj[-1], field.x, field.y, field.scale)
if isinstance(field, QRCodeField):
qrcode = pdf
page1 = page1_xobj.render()
# Generate page 2
qrcode_xobj = PageMerge().add(qrcode)
set_position(qrcode_xobj[0], 1.3, 16.9, 2.15)
page2 = qrcode_xobj.render()
page2.MediaBox = page1.MediaBox
# Generate certificate document
PdfWriter().addpages([page1, page2]).write(CERTIFICATE_PDF)
# Remove temporary files.
for field in fields:
Path(field.filename).unlink()
return CERTIFICATE_PDF
if __name__ == '__main__':
with open(PERSONS_FILE) as file:
persons = [Person(*person_entry) for person_entry in json.load(file)]
if sys.platform == 'ios':
PersonsView(persons).present()
else:
pdf_file = generate_certificate_pdf(
person=persons[0],
reason=0,
start=datetime.now(),
generated=datetime.now())
|
threading.py
|
"""Functionality for working multi-threaded code."""
import inspect
import logging
import threading
import time
from typing import Dict, Tuple
from .._internal.trace import trace_str
class DeadlockMonitor:
"""A monitor for deadlocked LoggingLocks."""
timeout_sec: float
sleep_sec: float
locks: Dict[int, Tuple[float, int, str, str]]
_lock: threading.Lock
_thread: threading.Thread
def __init__(self, timeout_sec: float, sleep_sec: float):
self.timeout_sec = timeout_sec
self.sleep_sec = sleep_sec
self.locks = {}
self._lock = threading.Lock()
self._thread = threading.Thread(name="DeadlockMonitor", target=self._run, daemon=True)
self._thread.start()
setattr(self, "deadlock_monitor_thread", self._thread)
def _run(self):
while True:
self._check_for_deadlocks()
time.sleep(self.sleep_sec)
def _check_for_deadlocks(self):
with self._lock:
t = time.time()
for k, v in self.locks.items():
if t - v[0] > self.timeout_sec:
self._log_deadlock()
return
def _log_deadlock(self):
t = time.time()
msg = "A likely deadlock was detected! Please create an issue at https://github.com/deephaven-examples/deephaven-ib/issues containing this error message\nOpen locks:\n"
for k, v in self.locks.items():
msg += f"age_sec={t-v[0]} lock_id={v[1]} name={v[2]}\n"
msg += "\n\nStacks:\n\n"
for k, v in self.locks.items():
msg += f"age_sec={t-v[0]} lock_id={v[1]} name={v[2]}\n{v[3]}\n"
logging.error(msg)
def acquire(self, lock_id: int, name: str, stack: str) -> None:
with self._lock:
self.locks[lock_id] = (time.time(), lock_id, name, stack)
def release(self, lock_id: int):
with self._lock:
# pop is used here instead of del, because there are instances where the locks are released multiple times
self.locks.pop(lock_id, None)
_lock_id: int = 0
_lock: threading.Lock = threading.Lock()
_deadlock_monitor: DeadlockMonitor = DeadlockMonitor(3 * 60.0, 10.0)
def _next_lock_id() -> int:
global _lock_id
with _lock:
_lock_id += 1
return _lock_id
class LoggingLock(object):
"""A threading lock that logs lock acquisition and release."""
name: str
log_stack: bool
def __init__(self, name: str, lock=None, log_level=logging.DEBUG, log_stack: bool = False):
if lock is None:
lock = threading.Lock()
self.name = str(name)
self.log_level = log_level
self.lock = lock
self.log_stack = log_stack
self.id = _next_lock_id()
self._log(f"{inspect.stack()[1][3]} created {self.name}")
def _log(self, msg: str) -> None:
if self.log_stack:
msg = f"{msg}: lock_id={self.id} thread_id={threading.get_ident()}\n{trace_str()}"
else:
msg = f"{msg}: lock_id={self.id} thread_id={threading.get_ident()}"
logging.log(self.log_level, msg)
def acquire(self, blocking=True):
self._log(f"{inspect.stack()[1][3]} trying to acquire {self.name}")
if _deadlock_monitor:
_deadlock_monitor.acquire(self.id, self.name, trace_str())
ret = self.lock.acquire(blocking)
if ret:
self._log(f"{inspect.stack()[1][3]} acquired {self.name}")
else:
self._log(f"{inspect.stack()[1][3]} non-blocking acquire of {self.name} lock failed")
return ret
def release(self):
self._log(f"{inspect.stack()[1][3]} releasing {self.name}")
if _deadlock_monitor:
_deadlock_monitor.release(self.id)
self.lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
return False # True causes exceptions to be swallowed. False causes exceptions to be handled.
|
default_sound.py
|
from multiprocessing import Process, Queue
from threading import Lock
import numpy as np
import pyaudio as pa
import soundfile as sf
audio = pa.PyAudio()
sound_lock = Lock()
class Microphone:
FORMAT = pa.paFloat32
CHANNELS = 2
RATE = 44100
CHUNK = 1024
def __init__(self, audio=audio):
r"""Initialize the microphone
Args:
audio (PyAudio): Audio class instance for opening streams.
"""
self.stream = audio.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
self.queue = Queue
self.freq_ind = np.arange(self.CHUNK) / (self.CHUNK / self.RATE)
def run(self):
r"""Main reading loop."""
while True: self.queue.put(stream.read(self.CHUNK))
def get(self):
r"""Get next data.
Returns:
numpy.ndarray: Data samples of size `self.CHUNK`.
"""
return self.queue.get()
def get_processed(self):
r"""Get processed data.
1. Data is multiplied with Hamming window.
2. Data is being prossed with FFT.
3. Data is presented as absolute value of FFT.
Returns:
Tuple[numpy.ndarray, numpy.ndarray]:
1. Processed data with FFT.
2. Table of frequencis.
"""
return np.abs(np.fft.fft(self.get() * np.hamming(self.CHUNK))), self.freq_ind
def close_audio(*args, **kwargs):
r"""Terminate all audio devices.
Args:
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
"""
if audio is None: return
sound_lock.acquire()
audio.terminate()
audio = None
sound_lock.release()
def start_recording(mic, *args, **kwargs):
r"""Start recording in background thread.
Args:
mic (Microphone): Instance of microfon to record from.
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
"""
mic.run()
def play_sound(file_name, *args, **kwargs):
r"""Method to play a sound from a file in blocking mode.
Args:
file_name (str): Path and file name to sound file to play.
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
"""
if audio is None: return
sound_lock.acquire()
sound_data, rate = sf.read(file_name, dtype='float32')
stream = audio.open(format=pa.paFloat32, channels=sound_data.shape[1], rate=rate, output=True)
stream.write(data.tostring())
stream.stop_stream()
stream.close()
sound_lock.release()
def play_sound_background(file_name, *args, **kwargs):
r"""Play sound on a thread.
Args:
file_name (str): Path and file name to sound to play.
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
"""
play_sound_thread = Process(target=play_sound, args=(file_name,))
play_sound_thread.start()
play_sound_thread.join()
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
|
tag2folder.py
|
from mutagen.flac import FLAC
from mutagen.mp3 import MP3
from mutagen.ogg import OggFileType
from mutagen.oggopus import OggOpus
from shutil import move
import itertools
import os
import re
import sys
import threading
import time
def loading():
for s in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rloading ' + s)
sys.stdout.flush()
time.sleep(0.1)
done = False
ext = [".mp3", ".ogg", ".opus", ".flac"]
extVorbis = [".ogg", ".opus", ".flac"]
oops = False
owd = os.getcwd()
if os.path.isfile("directory.ini"):
musicDir = open('directory.ini').read()
else:
musicDir = owd + "/music"
if not os.path.exists("music"):
os.makedirs("music")
g = threading.Thread(target=loading)
g.start()
for dname, dirs, files in os.walk(musicDir):
for fname in files:
fpath = os.path.join(dname, fname)
if fname.endswith(tuple(extVorbis)):
if fname.endswith(tuple(extVorbis)):
if fpath.endswith(".flac"):
try:
tags = FLAC(fpath)
except:
oops = True
if fpath.endswith(".mp3"):
try:
tags = MP3(fpath)
except:
oops = True
if fpath.endswith(".ogg"):
try:
tags = OggFileType(fpath)
except:
oops = True
if fname.endswith(".opus"):
try:
tags = OggOpus(fpath)
except:
oops = True
try:
tags = tags.pprint()
try:
artistStart = tags.index('artist=')
except:
try:
artistStart = tags.index('ARTIST=')
except:
artistStart = tags.index('Artist=')
loose = tags[artistStart+7:]
try:
stopPoint = loose.index('\n')
except:
print("")
artist = loose[:stopPoint]
try:
albumStart = tags.index('album=')
except:
try:
albumStart = tags.index('ALBUM=')
except:
albumStart = tags.index('Album=')
loose = tags[albumStart+6:]
try:
stopPoint = loose.index('\n')
except:
print("")
album = loose[:stopPoint]
artistStrip = re.sub('[!@#$:;]', '', artist)
albumStrip = re.sub('[!@#$:;]', '', album)
albumFolder = musicDir + "\\" + artistStrip + "\\" + albumStrip + "\\"
try:
os.makedirs(albumFolder)
except:
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
except:
oops = True
try:
move(musicDir + "\\" + fname, musicDir + "\\" + artistStrip + "\\" + albumStrip + "\\" + fname)
except:
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
if oops == True:
print("Something went wrong with some file names... nothing was lost though!")
done = True
|
QUBOValues.py
|
import math
import random
import multiprocessing
from os import pardir
from time import time
from pprint import pformat, pprint
from numba.core.types.containers import List
from numpy import array
from utils import (
get_variables_combinations,
get_variables_combinations_with_replacement,
get_variables_permutations,
split_equal_part
)
from numba.typed import List
from numba import njit, jit
# MANAGER = multiprocessing.Manager()
def getValues(filename, alpha="1/(ri*qi)"):
examples, n, states = getExamples(filename)
# calculation of parentSets
parentSets = calcParentSets(n)
# calculation of w
w = calcW(n, parentSets, states, examples, alpha=alpha)
# calculation of delta
delta = calcDelta(n, parentSets, w)
# calculation of deltaMax
deltaMax = calcDeltaMax(n, delta)
# calculation of deltaTrans
deltaTrans = calcDeltaTrans(n, delta)
# calculation of deltaConsist
deltaConsist = calcDeltaConsist(n, deltaTrans)
return n, parentSets, w, deltaMax, deltaTrans, deltaConsist
def fill_examples(shared_list, examples, subprob, n):
for ex in examples:
shared_list.append([ex[i] for i in subprob])
def getValuesForSubproblems(
filename,
alpha="1/(ri*qi)",
sub_problem_cardinality=3,
subproblems_creation_method=get_variables_permutations,
):
EXAMPLES, N, STATES = getExamples(filename)
# random.shuffle(EXAMPLES)
# Examples_dict = {}
# for i in range(N):
# Examples_dict[i] = []
# for i in range(len(EXAMPLES)):
# for j in range(len(EXAMPLES[i])):
# Examples_dict[j].append(EXAMPLES[i][j])
subProblemsData = {}
subProblemsColIndexes = subproblems_creation_method(N, sub_problem_cardinality)
# subProblemsColIndexes = subProblemsColIndexes[:1] # prendo solo il primo sottoproblema per test
# breakpoint()
# MultiPro
EXAMPLES_LENGHT = len(EXAMPLES)
# parts = split_equal_part(EXAMPLES_LENGHT, 2)
# split Examples
# examples_split = [EXAMPLES[i[0]:i[1]] for i in parts]
# Il problema è la lista condivisa
for subprob in subProblemsColIndexes:
print(f"Doing {subprob}\n")
subProblemsData[subprob] = {}
subProblemsData[subprob]["n"] = sub_problem_cardinality
subProblemsData[subprob]["states"] = array([STATES[i] for i in subprob])
#multiprocess test
# l1 = []
# l2 = []
# proc1 = multiprocessing.Process(target=fill_examples, args=[l1, examples_split[0], subprob, 0])
# proc2 = multiprocessing.Process(target= fill_examples, args=[l2, examples_split[1], subprob, 1])
# proc1.start()
# proc2.start()
# proc1.join()
# proc2.join()
# subProblemsData[subprob]["examples"] = list(itertools.chain(l1,l2))
# EXAMPLES[parts[i][0]:parts[i][1]] fare in modo da calcolarlo solo una volta
#Normal way
subProblemsData[subprob]["examples"] = array(filter_example(subprob, EXAMPLES, EXAMPLES_LENGHT))
# subProblemsData[subprob]["examples"] = []
# for example in EXAMPLES:
# subProblemsData[subprob]["examples"].append([example[i] for i in subprob])
#numpy test(Failed)
# subProblemsData[subprob]["examples"] = array([Examples_dict[i] for i in subprob]).T.tolist()
# pprint(subProblemsData[subprob]["examples"])
# breakpoint()
subProblemsData[subprob]["parentSets"] = calcParentSets(
subProblemsData[subprob]["n"]
)
subProblemsData[subprob]["w"] = calcW(
subProblemsData[subprob]["n"],
subProblemsData[subprob]["parentSets"],
subProblemsData[subprob]["states"],
subProblemsData[subprob]["examples"],
alpha=alpha,
)
# del subProblemsData[subprob]["examples"]
subProblemsData[subprob]["delta"] = calcDelta(
subProblemsData[subprob]["n"],
subProblemsData[subprob]["parentSets"],
subProblemsData[subprob]["w"],
)
subProblemsData[subprob]["deltaMax"] = calcDeltaMax(
subProblemsData[subprob]["n"], subProblemsData[subprob]["delta"]
)
subProblemsData[subprob]["deltaTrans"] = calcDeltaTrans(
subProblemsData[subprob]["n"], subProblemsData[subprob]["delta"]
)
subProblemsData[subprob]["deltaConsist"] = calcDeltaConsist(
subProblemsData[subprob]["n"], subProblemsData[subprob]["deltaTrans"]
)
return subProblemsData, N
@jit(nopython= True)
def filter_example(subprob, Examples, lenght):
res = []
for i in range(lenght):
sub_res = []
for j in range(len(subprob)):
sub_res.append(Examples[i][subprob[j]])
res.append(sub_res)
return res
def getExamples(filename):
with open(filename, "r") as f:
lines = f.read().splitlines()
# get info from first line
info = lines.pop(0).split(" ")
n = int(info[0])
# number of states for each variable
states = [int(si) for si in info[1 : n + 1]]
# get examples
# examples = []
examples = []
for i in range(len(lines)):
ls = lines[i].split(" ")
l2 = []
for j in range(len(ls)):
l2.append(int(ls[j]))
examples.append(l2)
# examples += [[int(x) for x in ls]]
return array(examples), n, array(states)
# subsets with max size m=2
def calcSubSets(s):
sSet = [()]
# all individuals
sSet += [(i,) for i in s]
# all unordered pairs
sSet += [(i, j) for i in s for j in s if i < j]
return sSet
def calcParentSets(n):
parentSets = []
for i in range(n):
maxSet = tuple(x for x in range(0, n) if x != i)
parentSets.append(calcSubSets(maxSet))
return parentSets
def calcAlpha(n, states, parentSets, N, alpha="1/(ri*qi)"):
if alpha == "1/(ri*qi)":
def ca(N, ri, qi):
return 1 / (ri * qi)
elif alpha == "1":
def ca(N, ri, qi):
return 1
elif alpha == "N/(ri*qi)":
def ca(N, ri, qi):
return N / (ri * qi)
elif alpha == "1/ri":
def ca(N, ri, qi):
return 1 / ri
alpha = []
for i in range(n):
alpha.append({})
# for each valid parent set
for parentSet in parentSets[i]:
# generate alpha
alpha[i][parentSet] = []
qi = calcQi(parentSet, states)
for j in range(qi):
alpha[i][parentSet].append([])
for k in range(states[i]):
alpha[i][parentSet][j].append([])
# initialize alpha according to the choice
alpha[i][parentSet][j][k] = ca(N, states[i], qi)
return alpha
def calcAlphaijSum(alpha, parentSet, i, j, states):
sum = 0
for alphaijk in alpha[i][parentSet][j]:
sum += alphaijk
return sum
@jit(nopython= True)
def calcJthState(j, parentSet, states):
# ASSUMPTION: all the variables have the same number of states,
# if this is false, some combinations will be ignored
p0 = parentSet[0]
# j = (states[p0]^1)*sp0 + (states[p1]^0)*sp1
# j = states[p0]*sp0 + sp1
sp0 = j // states[p0]
sp1 = j % states[p0]
return sp0, sp1
@jit(nopython= True)
def calcNijk(examples, parentSet, i, j, k, states):
count = 0
for example in examples:
# variable i is in the k-th state
if example[i] == k:
# parent set is in the j-th state
if len(parentSet) == 0:
# empty parent set -> only has one state j=0
if j == 0:
count = count + 1
elif len(parentSet) == 1:
# one variable -> is that variable in its j-th state?
if example[parentSet[0]] == j:
count = count + 1
else:
# parent set has 2 variables
sp0, sp1 = calcJthState(j, parentSet, states)
p0 = parentSet[0]
p1 = parentSet[1]
if example[p0] == sp0 and example[p1] == sp1:
count = count + 1
return count
def calcNijSum(examples, parentSet, i, j, states):
sum = 0
for k in range(states[i]):
sum += calcNijk(examples, parentSet, i, j, k, states)
return sum
def calcQi(parentSet, states):
qi = 1
for j in parentSet:
qi *= states[j]
return qi
def calcSi(i, parentSet, states, alpha, examples):
qi = calcQi(parentSet, states)
sum = 0
for j in range(qi):
alphaij = calcAlphaijSum(alpha, parentSet, i, j, states)
Nij = calcNijSum(examples, parentSet, i, j, states)
sum += math.lgamma(alphaij) - math.lgamma(alphaij + Nij)
for k in range(states[i]):
Nijk = calcNijk(examples, parentSet, i, j, k, states)
sum += math.lgamma(alpha[i][parentSet][j][k] + Nijk) - math.lgamma(
alpha[i][parentSet][j][k]
)
return -sum
def calcWi(i, parentSet, s):
if parentSet == ():
return s[i][()]
elif len(parentSet) == 1:
return s[i][parentSet] - s[i][()]
elif len(parentSet) == 2:
p0 = parentSet[0]
p1 = parentSet[1]
return s[i][parentSet] - s[i][(p0,)] - s[i][(p1,)] + s[i][()]
def calcS(n, states, parentSets, alpha, examples):
s = []
for i in range(n):
s.append({})
# for each valid parent set
for parentSet in parentSets[i]:
# calculate si
s[i][parentSet] = calcSi(i, parentSet, states, alpha, examples)
return s
def calcWFromS(n, parentSets, s):
w = []
for i in range(n):
w.append({})
for parentSet in parentSets[i]:
# calculate wi
w[i][parentSet] = calcWi(i, parentSet, s)
return w
def calcW(n, parentSets, states, examples, alpha="1/(ri*qi)"):
# calculation of alpha
alpha = calcAlpha(n, states, parentSets, len(examples), alpha=alpha)
# calculation of s
s = calcS(n, states, parentSets, alpha, examples)
# print()
# calculation of w
w = calcWFromS(n, parentSets, s)
return w
def calcDeltaji(j, i, w, parentSets, n):
deltaPrimeji = -w[i][(j,)]
# for all parentSets for i, {j,k}
for parentSet in parentSets[i]:
if len(parentSet) == 2:
if j in parentSet:
deltaPrimeji -= min(0, w[i][parentSet])
return max(0, deltaPrimeji)
def getDelta(j, i, delta):
posI = i if i < j else i - 1
return delta[j][posI]
def calcDelta(n, parentSets, w):
delta = []
for j in range(n):
delta.append([])
for i in range(n):
if i != j:
delta[j].append(calcDeltaji(j, i, w, parentSets, n))
return delta
def calcDeltaMax(n, delta):
deltaMax = []
for i in range(n):
maxDelta = 0
for j in range(n):
if i != j:
maxDelta = max(maxDelta, getDelta(j, i, delta))
# +1 to satisfy the > constraint
deltaMax.append(maxDelta + 1)
return deltaMax
def calcDeltaTrans(n, delta):
# calculate max of delta
maxDelta = 0
for j in range(n):
for i in range(n):
if i != j:
maxDelta = max(maxDelta, getDelta(j, i, delta))
# calculate deltaTrans -> they are all the same
# +1 to satisfy the > constraint
deltaTrans = maxDelta + 1
return deltaTrans
def calcDeltaConsist(n, deltaTrans):
# need to calculate max between deltaTrans(i,j,k), but there is only one deltaTrans
# +1 to satisfy the > constraint
deltaConsist = (n - 2) * deltaTrans + 1
return deltaConsist
def main():
pass
if __name__ == "__main__":
main()
|
test_dp_correctness.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.distributed.helper import get_device_count_by_fork
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = mge.load(model_path)
data = checkpoint["data"]
label = checkpoint["label"]
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker(rank, max_err):
dist.init_process_group("localhost", port, p_num, rank, rank)
net = MnistNet(has_bn=True)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
# use same data and label for all gpu's
# such that the result does not depend on number of gpu
data_train = Tensor(data)
label_train = Tensor(label)
loss = train(data_train, label_train, net, opt, gm)
np.testing.assert_allclose(loss.numpy(), checkpoint["loss"], atol=max_err)
if dist.get_rank():
return
for param, param_ref in zip(
net.state_dict().items(), checkpoint["net_updated"].items()
):
assert param[0] == param_ref[0]
if "bn" in param[0]:
ref = param_ref[1].reshape(param[1].shape)
np.testing.assert_allclose(param[1], ref, atol=max_err)
else:
np.testing.assert_allclose(param[1], param_ref[1], atol=max_err)
procs = []
for rank in range(p_num):
p = mp.Process(target=worker, args=(rank, max_err,))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 4, reason="need more gpu device")
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dp_correctness():
model_name = "mnist_model_with_test.mge"
model_path = os.path.join(os.path.dirname(__file__), model_name)
set_conv_execution_strategy("HEURISTIC_REPRODUCIBLE")
run_test(model_path, False, False, max_err=1e-5)
|
dankMemer.py
|
from colored import fg, attr
import requests
import threading
import time
import random
r = fg(241) # Setup color variables
r2 = fg(255)
b = fg(31)
w = fg(15)
def start():
token = input(f"\n {r2}[{b}?{r2}] Token: ")
channel = input(f" {r2}[{b}?{r2}] Channel Id: ")
def execute_command(command = "", cooldown = 0):
print(f"{r2}[{b}!{r2} Loaded: '{command}' With cooldown of {cooldown} Seconds")
while True:
requests.post(
f"https://discord.com/api/channels/{channel}/messages",
data = {'content': command},
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7',
'Authorization' : token
}
)
print(f"{r2}[{b}+{r2}] '{command}' Ran successfully")
time.sleep(cooldown + random.randint(2, 10))
commands = {
"pls beg" : 45,
"pls hunt" : 40,
"pls fish" : 40,
"pls daily" : 86400
}
print()
for cmd, cooldown in commands.items():
threading.Thread(target = execute_command, kwargs = {"command" : cmd, "cooldown" : cooldown}).start()
time.sleep(5)
|
wsgi.py
|
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
# Copyright (c) 2009 Domen Kozar <domen@dev.si>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Note: This file is very trim now because we've broken wsgi_fileserver, wsgi_proxy, wsgi_jsonrpc, and wsgi_xmlrpc
## in to their own libraries which are now distributed on their own and treated as dependencies
import httplib
import copy
import socket
import random
import os
import logging
import threading
import sys
from time import sleep
if not sys.version.startswith('2.4'):
from urlparse import urlparse
else:
# python 2.4
from windmill.tools.urlparse_25 import urlparse
logger = logging.getLogger(__name__)
import windmill
from windmill.server import proxy
from windmill.dep import wsgi_jsonrpc
from windmill.dep import wsgi_xmlrpc
from windmill.dep import wsgi_fileserver
import jsmin
START_DST_PORT = 32000
CURRENT_DST_PORT = [random.randint(32000, 34000)]
def reconstruct_url(environ):
# From WSGI spec, PEP 333
from urllib import quote
url = environ['wsgi.url_scheme']+'://'
if environ.get('HTTP_HOST'): url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += environ.get('SCRIPT_NAME','')
url += environ.get('PATH_INFO','')
# Fix ;arg=value in url
if url.find('%3B') is not -1:
url, arg = url.split('%3B', 1)
url = ';'.join([url, arg.replace('%3D', '=')])
# Stick query string back in
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
# Stick it in environ for convenience
environ['reconstructed_url'] = url
return url
HTTPConnection = httplib.HTTPConnection
WindmillProxyApplication = proxy.WindmillProxyApplication
WindmillProxyApplication.ConnectionClass = HTTPConnection
add_namespace = None
class WindmillChooserApplication(object):
"""Application to handle choosing the proper application to handle each request"""
def __init__(self, apps, proxy):
self.namespaces = dict([ (arg.ns, arg) for arg in apps ])
self.proxy = proxy
def add_namespace(self, name, application):
"""Add an application to a specific url namespace in windmill"""
self.namespaces[name] = application
def handler(self, environ, start_response):
"""Windmill app chooser"""
sleep(.2)
reconstruct_url(environ)
for key in self.namespaces:
if environ['PATH_INFO'].find('/'+key+'/') is not -1:
logger.debug('dispatching request %s to %s' % (environ['reconstructed_url'], key))
return self.namespaces[key](environ, start_response)
logger.debug('dispatching request %s to WindmillProxyApplication' % reconstruct_url(environ))
response = self.proxy(environ, start_response)
return response
def __call__(self, environ, start_response):
response = self.handler(environ, start_response)
for x in response:
yield x
class WindmillCompressor(object):
"""Full JavaScript Compression Library"""
js_file_list = [
('lib', 'firebug', 'pi.js',),
('lib', 'firebug', 'firebug-lite.js',),
('lib', 'json2.js',),
('lib', 'browserdetect.js',),
('wm', 'windmill.js',), # fleegix
('lib', 'getXPath.js',),
('lib', 'elementslib.js',),
('lib', 'js-xpath.js',),
('controller', 'controller.js',),
('controller', 'commands.js',),
('controller', 'asserts.js',),
('controller', 'waits.js',), # fleegix
('controller', 'flex.js',),
('wm', 'registry.js',),
('extensions', 'extensions.js',),
('wm', 'utils.js',), # fleegix
('wm', 'ide', 'ui.js',), # fleegix
('wm', 'ide', 'recorder.js',), # fleegix
('wm', 'ide', 'remote.js',), # fleegix
('wm', 'ide', 'dx.js',), # fleegix
('wm', 'ide', 'ax.js',), # fleegix
('wm', 'ide', 'results.js',),
('wm', 'xhr.js',), # fleegix
('wm', 'metrics.js',),
('wm', 'events.js',),
('wm', 'global.js',), # fleegix
('wm', 'jstest.js',), # fleegix
('wm', 'load.js',),
]
def __init__(self, js_path, enabled=True):
self.enabled = enabled
self.js_path = js_path
self.compressed_windmill = None
if enabled:
self._thread = threading.Thread(target=self.compress_file)
self._thread.start()
def compress_file(self):
compressed_windmill = ''
for filename in self.js_file_list:
compressed_windmill += jsmin.jsmin(open(os.path.join(self.js_path, *filename), 'r').read())
self.compressed_windmill = compressed_windmill
def __call__(self, environ, start_response):
if not self.enabled:
start_response('404 Not Found', [('Content-Type', 'text/plain',), ('Content-Length', '0',)])
return ['']
# if self.compressed_windmill is None:
# self.compressed_windmill = ''
# for filename in self.js_file_list:
# self.compressed_windmill += jsmin.jsmin(open(os.path.join(self.js_path, *filename), 'r').read())
while not self.compressed_windmill:
sleep(.15)
start_response('200 Ok', [('Content-Type', 'application/x-javascript',),
('Content-Length', str(len(self.compressed_windmill)),)])
return [self.compressed_windmill]
def make_windmill_server(http_port=None, js_path=None, compression_enabled=None):
if http_port is None:
http_port = windmill.settings['SERVER_HTTP_PORT']
if js_path is None:
js_path = windmill.settings['JS_PATH']
if compression_enabled is None:
compression_enabled = not windmill.settings['DISABLE_JS_COMPRESS']
# Start up all the convergence objects
import convergence
test_resolution_suite = convergence.TestResolutionSuite()
command_resolution_suite = convergence.CommandResolutionSuite()
queue = convergence.ControllerQueue(command_resolution_suite, test_resolution_suite)
xmlrpc_methods_instance = convergence.XMLRPCMethods(queue, test_resolution_suite, command_resolution_suite)
jsonrpc_methods_instance = convergence.JSONRPCMethods(queue, test_resolution_suite, command_resolution_suite)
# Start up all the wsgi applications
windmill_serv_app = wsgi_fileserver.WSGIFileServerApplication(root_path=js_path, mount_point='/windmill-serv/')
windmill_proxy_app = WindmillProxyApplication()
windmill_xmlrpc_app = wsgi_xmlrpc.WSGIXMLRPCApplication(instance=xmlrpc_methods_instance)
windmill_jsonrpc_app = wsgi_jsonrpc.WSGIJSONRPCApplication(instance=jsonrpc_methods_instance)
windmill_compressor_app = WindmillCompressor(os.path.join(js_path, 'js'), compression_enabled)
windmill_serv_app.ns = 'windmill-serv'
windmill_xmlrpc_app.ns = 'windmill-xmlrpc'
windmill_jsonrpc_app.ns = 'windmill-jsonrpc'
windmill_compressor_app.ns = 'windmill-compressor'
global add_namespace
import https
if windmill.has_ssl:
import certificate
cc = certificate.CertificateCreator()
else:
cc = None
httpd = https.WindmillHTTPServer(('0.0.0.0', http_port),
https.WindmillHTTPRequestHandler, cc,
apps=[windmill_serv_app, windmill_jsonrpc_app,
windmill_xmlrpc_app, windmill_compressor_app],
proxy=https.WindmillHTTPSProxyApplication())
add_namespace = httpd.add_namespace
# Attach some objects to httpd for convenience
httpd.controller_queue = queue
httpd.test_resolution_suite = test_resolution_suite
httpd.command_resolution_suite = command_resolution_suite
httpd.xmlrpc_methods_instance = xmlrpc_methods_instance
httpd.jsonrpc_methods_instance = jsonrpc_methods_instance
return httpd
|
quadcopter.py
|
import numpy as np
import math
import scipy.integrate
import time
import datetime
import threading
class Propeller():
def __init__(self, prop_dia, prop_pitch, thrust_unit='N'):
self.dia = prop_dia
self.pitch = prop_pitch
self.thrust_unit = thrust_unit
self.speed = 0 #RPM
self.thrust = 0
def set_speed(self,speed):
self.speed = speed
# From http://www.electricrcaircraftguy.com/2013/09/propeller-static-dynamic-thrust-equation.html
self.thrust = 4.392e-8 * self.speed * math.pow(self.dia,3.5)/(math.sqrt(self.pitch))
self.thrust = self.thrust*(4.23e-4 * self.speed * self.pitch)
if self.thrust_unit == 'Kg':
self.thrust = self.thrust*0.101972
class Quadcopter():
# State space representation: [x y z x_dot y_dot z_dot theta phi gamma theta_dot phi_dot gamma_dot]
# From Quadcopter Dynamics, Simulation, and Control by Andrew Gibiansky
def __init__(self,quads,gravity=9.81,b=0.0245):
self.quads = quads
self.g = gravity
self.b = b
self.thread_object = None
self.ode = scipy.integrate.ode(self.state_dot).set_integrator('vode',nsteps=500,method='bdf')
self.time = datetime.datetime.now()
for key in self.quads:
self.quads[key]['state'] = np.zeros(12)
self.quads[key]['state'][0:3] = self.quads[key]['position']
self.quads[key]['state'][6:9] = self.quads[key]['orientation']
self.quads[key]['m1'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])
self.quads[key]['m2'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])
self.quads[key]['m3'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])
self.quads[key]['m4'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])
# From Quadrotor Dynamics and Control by Randal Beard
ixx=((2*self.quads[key]['weight']*self.quads[key]['r']**2)/5)+(2*self.quads[key]['weight']*self.quads[key]['L']**2)
iyy=ixx
izz=((2*self.quads[key]['weight']*self.quads[key]['r']**2)/5)+(4*self.quads[key]['weight']*self.quads[key]['L']**2)
self.quads[key]['I'] = np.array([[ixx,0,0],[0,iyy,0],[0,0,izz]])
self.quads[key]['invI'] = np.linalg.inv(self.quads[key]['I'])
self.run = True
def rotation_matrix(self,angles):
ct = math.cos(angles[0])
cp = math.cos(angles[1])
cg = math.cos(angles[2])
st = math.sin(angles[0])
sp = math.sin(angles[1])
sg = math.sin(angles[2])
R_x = np.array([[1,0,0],[0,ct,-st],[0,st,ct]])
R_y = np.array([[cp,0,sp],[0,1,0],[-sp,0,cp]])
R_z = np.array([[cg,-sg,0],[sg,cg,0],[0,0,1]])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R
def wrap_angle(self,val):
return( ( val + np.pi) % (2 * np.pi ) - np.pi )
def state_dot(self, time, state, key):
state_dot = np.zeros(12)
# The velocities(t+1 x_dots equal the t x_dots)
state_dot[0] = self.quads[key]['state'][3]
state_dot[1] = self.quads[key]['state'][4]
state_dot[2] = self.quads[key]['state'][5]
# The acceleration
x_dotdot = np.array([0,0,-self.quads[key]['weight']*self.g]) + \
np.dot(self.rotation_matrix(self.quads[key]['state'][6:9]), \
np.array([0,0,( \
self.quads[key]['m1'].thrust + \
self.quads[key]['m2'].thrust + \
self.quads[key]['m3'].thrust + \
self.quads[key]['m4'].thrust)] \
) \
)/self.quads[key]['weight']
state_dot[3] = x_dotdot[0]
state_dot[4] = x_dotdot[1]
state_dot[5] = x_dotdot[2]
# The angular rates(t+1 theta_dots equal the t theta_dots)
state_dot[6] = self.quads[key]['state'][9]
state_dot[7] = self.quads[key]['state'][10]
state_dot[8] = self.quads[key]['state'][11]
# The angular accelerations
omega = self.quads[key]['state'][9:12]
tau = np.array([self.quads[key]['L']*(self.quads[key]['m1'].thrust-self.quads[key]['m3'].thrust), self.quads[key]['L']*(self.quads[key]['m2'].thrust-self.quads[key]['m4'].thrust), self.b*(self.quads[key]['m1'].thrust-self.quads[key]['m2'].thrust+self.quads[key]['m3'].thrust-self.quads[key]['m4'].thrust)])
omega_dot = np.dot(self.quads[key]['invI'], (tau - np.cross(omega, np.dot(self.quads[key]['I'],omega))))
state_dot[9] = omega_dot[0]
state_dot[10] = omega_dot[1]
state_dot[11] = omega_dot[2]
return state_dot
def update(self, dt):
for key in self.quads:
self.ode.set_initial_value(self.quads[key]['state'],0).set_f_params(key)
self.quads[key]['state'] = self.ode.integrate(self.ode.t + dt)
self.quads[key]['state'][6:9] = self.wrap_angle(self.quads[key]['state'][6:9])
self.quads[key]['state'][2] = max(0,self.quads[key]['state'][2])
def set_motor_speeds(self,quad_name,speeds):
self.quads[quad_name]['m1'].set_speed(speeds[0])
self.quads[quad_name]['m2'].set_speed(speeds[1])
self.quads[quad_name]['m3'].set_speed(speeds[2])
self.quads[quad_name]['m4'].set_speed(speeds[3])
def get_position(self,quad_name):
return self.quads[quad_name]['state'][0:3]
def get_linear_rate(self,quad_name):
return self.quads[quad_name]['state'][3:6]
def get_orientation(self,quad_name):
return self.quads[quad_name]['state'][6:9]
def get_angular_rate(self,quad_name):
return self.quads[quad_name]['state'][9:12]
def get_state(self,quad_name):
return self.quads[quad_name]['state']
def set_position(self,quad_name,position):
self.quads[quad_name]['state'][0:3] = position
def set_orientation(self,quad_name,orientation):
self.quads[quad_name]['state'][6:9] = orientation
def get_time(self):
return self.time
def thread_run(self,dt,time_scaling):
rate = time_scaling*dt
last_update = self.time
while(self.run==True):
time.sleep(0)
self.time = datetime.datetime.now()
if (self.time-last_update).total_seconds() > rate:
self.update(dt)
last_update = self.time
def start_thread(self,dt=0.002,time_scaling=1):
self.thread_object = threading.Thread(target=self.thread_run,args=(dt,time_scaling), daemon=True)
self.thread_object.start()
def stop_thread(self):
self.run = False
|
player.py
|
import mido
from mido import MidiFile, MetaMessage
import time
import random
import threading
class SonascaPlayer(object):
_output_port = None
_note_on_queue = {}
_note_adj_factor = 0
_note_adj_counter = 0
_mangle_enabled = False
def __init__(self, note_adj_factor=0, note_adj_frequency=10):
self._output_port = [
port for port in mido.get_output_names()
if 'Synth' in port and 'input' in port][0]
self._note_on_queue = {}
self._note_adj_factor = note_adj_factor
self._note_adj_frequency = note_adj_frequency
self.note_adj_counter = 0
def _send_notes(self, notes, output):
for note in notes:
if not isinstance(note, MetaMessage):
output.send(note)
def _process_notes(self, notes):
for note in notes:
# channel 9 in mido is channel 10 in midi - rhythm
# we'll leave that alone because the results will
# not really be what we want
if note.channel != 9:
self._mangle_note(note)
def _mangle_note(self, note):
if note.type == 'note_on':
self._note_on_queue[
'%s,%s' % (note.channel, note.note)] = note
if self._should_muck():
note.note += random.randint(
0, self._note_adj_factor)
if note.type == 'note_off':
orig_note_on = self._note_on_queue.pop(
'%s,%s' % (note.channel, note.note), None)
if orig_note_on:
note.note = orig_note_on.note
else:
print 'Note off not matched: %s' % note
def _should_muck(self):
if self._mangle_enabled:
self._note_adj_counter += 1
if self._note_adj_counter >= self._note_adj_frequency:
self._note_adj_counter = 0
return True
return False
def play(self, filename):
midifile = MidiFile(filename)
with mido.open_output(self._output_port,
autoreset=True) as output_to_synth:
note_cache = []
for message in midifile:
if not isinstance(message, MetaMessage):
note_cache.append(message)
if message.time > 0:
self._process_notes(note_cache)
self._send_notes(note_cache, output_to_synth)
note_cache = []
time.sleep(message.time)
def set_mangle_factor(self, mangle_factor=0):
print "Mangle factor to be set to %s" % mangle_factor
if mangle_factor not in range(0, 11):
raise Exception('Mangle factor required between 0 and 10')
if mangle_factor == 0:
self._mangle_enabled = False
else:
self._mangle_enabled = True
self._note_adj_frequency = 11 - mangle_factor
if __name__ == '__main__':
sonasca_player = SonascaPlayer(note_adj_factor=4, note_adj_frequency=10)
play_thread = threading.Thread(
target=sonasca_player.play, args=('resources/tubthumping.mid',))
play_thread.start()
for i in range(1, 11):
print "i = %s" % i
sonasca_player.set_mangle_factor(mangle_factor=i)
print "Mangler enabled = %s, note adj frequency to 1 in %s" % (
sonasca_player._mangle_enabled, sonasca_player._note_adj_frequency)
time.sleep(2)
time.sleep(15)
for i in range(1, 11):
print "i = %s" % i
sonasca_player.set_mangle_factor(mangle_factor=(10 - i))
print "Mangler enabled = %s, note adj frequency to 1 in %s" % (
sonasca_player._mangle_enabled, sonasca_player._note_adj_frequency)
time.sleep(2)
play_thread.join()
|
traybar.py
|
import os
from .win32_adapter import *
import threading
import uuid
class SysTrayIcon(object):
"""
menu_options: tuple of tuples (menu text, menu icon path or None, function name)
menu text and tray hover text should be Unicode
hover_text length is limited to 128; longer text will be truncated
Can be used as context manager to enable automatic termination of tray
if parent thread is closed:
with SysTrayIcon(icon, hover_text) as systray:
for item in ['item1', 'item2', 'item3']:
systray.update(hover_text=item)
do_something(item)
"""
QUIT = 'QUIT'
SPECIAL_ACTIONS = [QUIT]
FIRST_ID = 1023
def __init__(self,
icon,
hover_text,
menu_options=None,
on_quit=None,
default_menu_index=None,
window_class_name=None):
self._icon = icon
self._icon_shared = False
self._hover_text = hover_text
self._on_quit = on_quit
menu_options = menu_options or ()
menu_options = menu_options + (('Quit', None, SysTrayIcon.QUIT),)
self._next_action_id = SysTrayIcon.FIRST_ID
self._menu_actions_by_id = set()
self._menu_options = self._add_ids_to_menu_options(list(menu_options))
self._menu_actions_by_id = dict(self._menu_actions_by_id)
window_class_name = window_class_name or ("SysTrayIconPy-%s" % (str(uuid.uuid4())))
self._default_menu_index = (default_menu_index or 0)
self._window_class_name = encode_for_locale(window_class_name)
self._message_dict = {RegisterWindowMessage("TaskbarCreated"): self._restart,
WM_DESTROY: self._destroy,
WM_CLOSE: self._destroy,
WM_COMMAND: self._command,
WM_USER+20: self._notify}
self._notify_id = None
self._message_loop_thread = None
self._hwnd = None
self._hicon = 0
self._hinst = None
self._window_class = None
self._menu = None
self._register_class()
def __enter__(self):
"""Context manager so SysTray can automatically close"""
self.start()
return self
def __exit__(self, *args):
"""Context manager so SysTray can automatically close"""
self.shutdown()
def WndProc(self, hwnd, msg, wparam, lparam):
hwnd = HANDLE(hwnd)
wparam = WPARAM(wparam)
lparam = LPARAM(lparam)
if msg in self._message_dict:
self._message_dict[msg](hwnd, msg, wparam.value, lparam.value)
return DefWindowProc(hwnd, msg, wparam, lparam)
def _register_class(self):
# Register the Window class.
self._window_class = WNDCLASS()
self._hinst = self._window_class.hInstance = GetModuleHandle(None)
self._window_class.lpszClassName = self._window_class_name
self._window_class.style = CS_VREDRAW | CS_HREDRAW
self._window_class.hCursor = LoadCursor(0, IDC_ARROW)
self._window_class.hbrBackground = COLOR_WINDOW
self._window_class.lpfnWndProc = LPFN_WNDPROC(self.WndProc)
RegisterClass(ctypes.byref(self._window_class))
def _create_window(self):
style = WS_OVERLAPPED | WS_SYSMENU
self._hwnd = CreateWindowEx(0, self._window_class_name,
self._window_class_name,
style,
0,
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
0,
0,
self._hinst,
None)
UpdateWindow(self._hwnd)
self._refresh_icon()
def _message_loop_func(self):
self._create_window()
PumpMessages()
def start(self):
if self._hwnd:
return # already started
self._message_loop_thread = threading.Thread(target=self._message_loop_func)
self._message_loop_thread.start()
def shutdown(self):
if not self._hwnd:
return # not started
PostMessage(self._hwnd, WM_CLOSE, 0, 0)
self._message_loop_thread.join()
def update(self, icon=None, hover_text=None):
""" update icon image and/or hover text """
if icon:
self._icon = icon
self._load_icon()
if hover_text:
self._hover_text = hover_text
self._refresh_icon()
def _add_ids_to_menu_options(self, menu_options):
result = []
for menu_option in menu_options:
option_text, option_icon, option_action = menu_option
if callable(option_action) or option_action in SysTrayIcon.SPECIAL_ACTIONS:
self._menu_actions_by_id.add((self._next_action_id, option_action))
result.append(menu_option + (self._next_action_id,))
elif non_string_iterable(option_action):
result.append((option_text,
option_icon,
self._add_ids_to_menu_options(option_action),
self._next_action_id))
else:
raise Exception('Unknown item', option_text, option_icon, option_action)
self._next_action_id += 1
return result
def _load_icon(self):
# release previous icon, if a custom one was loaded
# note: it's important *not* to release the icon if we loaded the default system icon (with
# the LoadIcon function) - this is why we assign self._hicon only if it was loaded using LoadImage
if not self._icon_shared and self._hicon != 0:
DestroyIcon(self._hicon)
self._hicon = 0
# Try and find a custom icon
hicon = 0
if self._icon is not None and os.path.isfile(self._icon):
icon_flags = LR_LOADFROMFILE | LR_DEFAULTSIZE
icon = encode_for_locale(self._icon)
hicon = self._hicon = LoadImage(0, icon, IMAGE_ICON, 0, 0, icon_flags)
self._icon_shared = False
# Can't find icon file - using default shared icon
if hicon == 0:
self._hicon = LoadIcon(0, IDI_APPLICATION)
self._icon_shared = True
self._icon = None
def _refresh_icon(self):
if self._hwnd is None:
return
if self._hicon == 0:
self._load_icon()
if self._notify_id:
message = NIM_MODIFY
else:
message = NIM_ADD
self._notify_id = NotifyData(self._hwnd,
0,
NIF_ICON | NIF_MESSAGE | NIF_TIP,
WM_USER+20,
self._hicon,
self._hover_text)
Shell_NotifyIcon(message, ctypes.byref(self._notify_id))
def _restart(self, hwnd, msg, wparam, lparam):
self._refresh_icon()
def _destroy(self, hwnd, msg, wparam, lparam):
if self._on_quit:
self._on_quit(self)
nid = NotifyData(self._hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, ctypes.byref(nid))
PostQuitMessage(0) # Terminate the app.
# TODO * release self._menu with DestroyMenu and reset the memeber
# * release self._hicon with DestoryIcon and reset the member
# * release loaded menu icons (loaded in _load_menu_icon) with DeleteObject
# (we don't keep those objects anywhere now)
self._hwnd = None
self._notify_id = None
def _notify(self, hwnd, msg, wparam, lparam):
if lparam == WM_LBUTTONDBLCLK:
self._execute_menu_option(self._default_menu_index + SysTrayIcon.FIRST_ID)
elif lparam == WM_RBUTTONUP:
self._show_menu()
elif lparam == WM_LBUTTONUP:
pass
return True
def _show_menu(self):
if self._menu is None:
self._menu = CreatePopupMenu()
self._create_menu(self._menu, self._menu_options)
#SetMenuDefaultItem(self._menu, 1000, 0)
pos = POINT()
GetCursorPos(ctypes.byref(pos))
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
SetForegroundWindow(self._hwnd)
TrackPopupMenu(self._menu,
TPM_LEFTALIGN,
pos.x,
pos.y,
0,
self._hwnd,
None)
PostMessage(self._hwnd, WM_NULL, 0, 0)
def _create_menu(self, menu, menu_options):
for option_text, option_icon, option_action, option_id in menu_options[::-1]:
if option_icon:
option_icon = self._prep_menu_icon(option_icon)
if option_id in self._menu_actions_by_id:
item = PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
wID=option_id)
InsertMenuItem(menu, 0, 1, ctypes.byref(item))
else:
submenu = CreatePopupMenu()
self._create_menu(submenu, option_action)
item = PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
hSubMenu=submenu)
InsertMenuItem(menu, 0, 1, ctypes.byref(item))
def _prep_menu_icon(self, icon):
icon = encode_for_locale(icon)
# First load the icon.
ico_x = GetSystemMetrics(SM_CXSMICON)
ico_y = GetSystemMetrics(SM_CYSMICON)
hicon = LoadImage(0, icon, IMAGE_ICON, ico_x, ico_y, LR_LOADFROMFILE)
hdcBitmap = CreateCompatibleDC(None)
hdcScreen = GetDC(None)
hbm = CreateCompatibleBitmap(hdcScreen, ico_x, ico_y)
hbmOld = SelectObject(hdcBitmap, hbm)
# Fill the background.
brush = GetSysColorBrush(COLOR_MENU)
FillRect(hdcBitmap, ctypes.byref(RECT(0, 0, 16, 16)), brush)
# draw the icon
DrawIconEx(hdcBitmap, 0, 0, hicon, ico_x, ico_y, 0, 0, DI_NORMAL)
SelectObject(hdcBitmap, hbmOld)
# No need to free the brush
DeleteDC(hdcBitmap)
DestroyIcon(hicon)
return hbm
def _command(self, hwnd, msg, wparam, lparam):
id = LOWORD(wparam)
self._execute_menu_option(id)
def _execute_menu_option(self, id):
menu_action = self._menu_actions_by_id[id]
if menu_action == SysTrayIcon.QUIT:
DestroyWindow(self._hwnd)
else:
menu_action(self)
def non_string_iterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return not isinstance(obj, str)
|
controllers.py
|
from ...db.settings import db, oidc, socketio, mongoclient
from flask import Flask, Blueprint, render_template, abort, g, request, jsonify, redirect
from oauth2client.client import OAuth2Credentials
from flask_socketio import emit, join_room, leave_room, send
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt, set_access_cookies, get_jti,
set_refresh_cookies, unset_jwt_cookies, decode_token, get_jwt_claims, verify_fresh_jwt_in_request, verify_jwt_in_request)
from ...repository.AdminPanelRepository import AdminPanelRepository
from ...repository.LocationRepository import LocationRepository
from ...repository.IssueRepository import IssueRepository
from functools import wraps
import json
import threading
import time
from bson import json_util
from twilio.rest import Client
from ..editor.controllers import twilio_account_sid
twilio_auth_token = "a03a300a56a986d5a49badc4a35842d7"
panel = Blueprint("adminpanel", __name__,
static_folder="static", template_folder="templates")
adminPanel = AdminPanelRepository(testing=False)
locationRepo = LocationRepository(testing=False)
issueRepo = IssueRepository(testing=False)
usersConnected = dict()
client = Client(twilio_account_sid, twilio_auth_token)
"""
HEADER FOR TRACKING REQUESTS
"""
from decorators import addBluePrint
addBluePrint("admin", panel)
def admin_required():
def wrapper(fn):
@wraps(fn)
def decorator(*args, **kwargs):
verify_jwt_in_request()
claims = get_raw_jwt()
print("verifying if admin....")
print("CLAIMS")
print(claims)
if claims["user_claims"]["is_administrator"]:
print("USER IS ADMIN")
return fn(*args, **kwargs)
else:
print("USER IS NO ADMIN")
return json.dumps({"permission_denied": True}), 801
return decorator
return wrapper
@panel.route('/', methods=["GET"])
def index():
return render_template('/admindashboard/index.html')
@panel.route('/getTotalUsers', methods=["POST"])
@jwt_required
@admin_required()
def getTotalUsersRoute():
claims = get_jwt_claims()
print("CLAIMS!")
print(claims)
data = request.form
start = data["start"]
end = data["end"]
return adminPanel.getUserCount(start=start, end=end)
@panel.route('/getTotalInteractions', methods=["POST"])
@jwt_required
@admin_required()
def getTotalInteractionsRoute():
data = request.form
start = data["start"]
end = data["end"]
return adminPanel.getInteractions(start=start, end=end)
@panel.route('/getIssues', methods=["POST"])
@jwt_required
@admin_required()
def getTotalIssuesRoute():
data = request.form
start = data["start"]
end = data["end"]
return issueRepo.getIssues(start=start, end=end)
@panel.route('/getTotalPresentations', methods=["POST"])
@jwt_required
@admin_required()
def getTotalPresentationsRoute():
data = request.form
start = data["start"]
end = data["end"]
return adminPanel.getPresentationCount(start=start, end=end)
@panel.route('/getUserInteractions', methods=["POST"])
@jwt_required
@admin_required()
def getUserInteractions():
data = request.form
start = data["start"]
end = data["end"]
return json.dumps({"res": adminPanel.getUserInteractions(start=start, end=end)})
@panel.route('/getActiveUsersOverTime', methods=["POST"])
@jwt_required
@admin_required()
def getActiveUsersOverTimeRoute():
data = request.form
start = data["start"]
end = data["end"]
return json.dumps({"res": adminPanel.getActiveUsersOverTime(start=start, end=end)})
@panel.route('/getCurrentOnlineUsers', methods=["GET"])
@jwt_required
@admin_required()
def getCurrentOnlineUsersRoute():
return json.dumps({"res": len(usersConnected)})
@panel.route('/getLocation', methods=["POST"])
@jwt_required
@admin_required()
def getLocationDataRoute():
data = request.form
start = data["start"]
end = data["end"]
return json.dumps({"res": locationRepo.getUsersAndLocation(start, end)})
@panel.route('/getLocationFromRequests', methods=["POST"])
@jwt_required
@admin_required()
def getLocationDataFromRequestsRoute():
data = request.form
start = data["start"]
end = data["end"]
return json.dumps({"res": locationRepo.getRequestsAndLocation(start, end)})
@panel.route('/getLocationCount', methods=["POST"])
@jwt_required
@admin_required()
def getLocationWithCountRoute():
data = request.form
start = data["start"]
end = data["end"]
locationData, totalRequest = locationRepo.getUserCountWithLocation(start, end)
return json.dumps({"res": locationData, "total_request": totalRequest})
@panel.route('/getLocationRequestsCount', methods=["POST"])
@jwt_required
@admin_required()
def getLocationRequestsCountRoute():
data = request.form
start = data["start"]
end = data["end"]
locationData, totalRequest = locationRepo.getRequestsCountWithLocation(start, end)
return json.dumps({"res": locationData, "total_request": totalRequest})
@panel.route('/getCreatedTasks', methods=["GET"])
@jwt_required
@admin_required()
def getTodaysTasksRoute():
return json.dumps({"res": adminPanel.getTodaysCreatedTasks()})
@panel.route('/getCreatedPresentations', methods=["GET"])
@jwt_required
@admin_required()
def getTodaysPresentationsRoute():
return json.dumps({"res": adminPanel.getTodaysCreatedPresentations()})
@panel.route('/getCreatedSlides', methods=["GET"])
@jwt_required
@admin_required()
def getTodaysSlidesRoute():
return json.dumps({"res": adminPanel.getTodaysCreatedSlides()})
@panel.route('/getVideoChatInformation', methods=["GET"])
@jwt_required
@admin_required()
def getVideoChatInformationRoute():
records = client.usage.records.today.list(category="group-rooms-participant-minutes")
duration = dict()
for el in records:
if el.category == "group-rooms-participant-minutes":
if el.category not in duration:
duration[el.category] = 0
duration[el.category] += float(el.usage)
return json.dumps({"res": duration})
@socketio.on('connectUser')
def userHasConnected(json):
print("User has connected!!!!")
if "session" not in usersConnected:
usersConnected[json["user_id"]] = dict()
usersConnected[json["user_id"]]["session"] = str(request.sid)[::-1]
usersConnected[json["user_id"]]["u_id"] = json["user_id"]
usersConnected[json["user_id"]]["jtime"] = time.time()
mongoclient.db["loginsTracker"].insert_one({"type": "login", "session": request.sid, "user": json["user_id"], "time": time.time()})
usersConnected[json["user_id"]]["sid"] = request.sid
join_room(json["user_id"])
thread = threading.Thread(target=handleConnect, kwargs=dict(user_id=json["user_id"]))
thread.start()
@socketio.on('disconnect')
def userHasDisconnected():
print("User has disconnected!!!")
thread = threading.Thread(target=handleDisconnect, kwargs=dict(user_id=request.sid))
thread.start()
def handleConnect(user_id):
socketio.emit('notifyOnlineUsers', json_util.dumps({"res": adminPanel.getOnlineUsers(users=usersConnected)}), broadCast=True)
socketio.emit('notifyUserCount', len(usersConnected), broadcast=True)
print("Notifying users!")
def handleDisconnect(user_id):
poppedUser = None
for el in list(usersConnected):
if usersConnected[el]["sid"] == user_id:
poppedUser = usersConnected.pop(el)
if poppedUser is not None:
print("Count:", len(usersConnected))
socketio.emit('notifyOnlineUsers', json_util.dumps({"res": adminPanel.getOnlineUsers(users=usersConnected)}), broadCast=True)
socketio.emit('notifyUserCount', len(usersConnected), broadcast=True)
print(poppedUser)
mongoclient.db["loginsTracker"].insert_one({"type": "logout", "session": poppedUser["sid"], "user": poppedUser["u_id"], "time": time.time(), "duration": int(round((time.time() - poppedUser["jtime"]) / 60))})
print("Notifying Users!!!!!")
|
fpipe.py
|
"""
Copyright 2021-2021 The jdh99 Authors. All rights reserved.
管道操作
Authors: jdh99 <jdh821@163.com>
"""
import tziot.config as config
import tziot.param as param
import socket
import threading
import lagan
import dcompy as dcom
import utzpy as utz
from typing import Callable
PIPE_NET = 0xffff
class _Api:
# 是否允许发送.函数原型:func() bool
is_allow_send = None # type: Callable[[], bool]
# 发送.函数原型:func(pipe: int, data: bytearray)
send = None # type: Callable[[int, bytearray], None]
_pipes = dict()
_pipe_num = 0
_socket = None
_observers = list()
def pipe_bind_net(ia: int, pwd: str, ip: str, port: int) -> int:
""" 绑定网络管道.绑定成功后返回管道号"""
global _socket
if _socket is not None:
lagan.warn(config.TAG, "already bind pipe net")
return PIPE_NET
config.local_pwd = pwd
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_socket.bind((ip, port))
_bind(PIPE_NET, ia, _socket_tx, _socket_is_allow_send)
threading.Thread(target=_socket_rx).start()
return PIPE_NET
def _socket_rx():
global _socket
while True:
data, address = _socket.recvfrom(config.FRAME_MAX_LEN)
if len(data) == 0:
continue
lagan.info(config.TAG, 'udp rx:%r len:%d', address, len(data))
lagan.print_hex(config.TAG, lagan.LEVEL_DEBUG, bytearray(data))
pipe_receive(dcom.addr_to_pipe(address[0], address[1]), data)
def _socket_tx(pipe: int, data: bytearray):
ip, port = dcom.pipe_to_addr(pipe)
_socket.sendto(data, (ip, port))
lagan.info(config.TAG, "udp send:ip:%s port:%d len:%d", ip, port, len(data))
lagan.print_hex(config.TAG, lagan.LEVEL_DEBUG, data)
def _socket_is_allow_send() -> bool:
return True
def pipe_receive(pipe: int, data: bytearray):
"""管道接收.pipe是发送方的管道号.如果是用户自己绑定管道,则在管道中接收到数据需回调本函数"""
_notify_observers(pipe, data)
def _notify_observers(pipe: int, data: bytearray):
global _observers
for v in _observers:
v(pipe, data)
def pipe_bind(ia: int, send, is_allow_send) -> int:
"""
绑定管道.绑定成功后返回管道号
:param ia: 设备单播地址
:param send: 发送函数.格式:func(dst_pipe: int, data: bytearray)
:param is_allow_send: 是否允许发送函数.格式:func() -> bool
:return: 管道号
"""
pipe = _get_pipe_num()
_bind(pipe, ia, send, is_allow_send)
return pipe
def _get_pipe_num() -> int:
global _pipe_num
_pipe_num += 1
return _pipe_num
def _bind(pipe: int, ia: int, send, is_allow_send):
config.local_ia = ia
api = _Api()
api.send = send
api.is_allow_send = is_allow_send
_pipes[pipe] = api
def pipe_is_allow_send(pipe: int) -> bool:
if pipe >= PIPE_NET:
pipe = PIPE_NET
if pipe not in _pipes:
return False
return _pipes[pipe].is_allow_send()
def pipe_send(pipe: int, data: bytearray):
if pipe == 0:
return
if pipe >= PIPE_NET:
if PIPE_NET not in _pipes:
return
v = _pipes[PIPE_NET]
else:
if pipe not in _pipes:
return
v = _pipes[PIPE_NET]
if pipe == PIPE_NET:
if param.parent.ia == utz.IA_INVALID or not param.parent.is_conn:
return
v.send(param.parent.pipe, data)
else:
v.send(pipe, data)
def register_rx_observer(callback):
"""
注册接收观察者
:param callback: 回调函数格式:func(pipe: int, data: bytearray)
"""
_observers.append(callback)
|
run.py
|
'''
Module with functions to run subprocesses and manage files
Also point of entry to run over external data
'''
from sys import argv
import sys
import traceback
import time
from datetime import date
import os
import json
from os.path import join as path_join
import shutil
import glob
import subprocess
from tqdm import tqdm
import nibabel as nib
import cv2 as cv
import numpy as np
from utils import viewnii, normalizeMri, file_dialog, alert_dialog, confirm_dialog, error_dialog, chunks
from dataset import mni_adni, default_adni, default_harp, mni_harp, HARP, HARP_CLASSES
from transforms import run_once, mni152reg, MNI_BUFFER_MATRIX_PATH, REGWorker
from get_models import get_models
from multiprocessing import Queue, Process, cpu_count
PRE_REGISTER_VOL_PATH = os.path.normpath('cache/pre_register_vol.nii.gz')
PRE_REGISTER_MASK_PATH = os.path.normpath('cache/pre_register_mask.nii.gz')
INVERSE_MATRIX_PATH = os.path.normpath('cache/invmnibuffer.mat')
TEMP_MASK_PATH = os.path.normpath('cache/mask.nii.gz')
MASKS_FOLDER = "e2dhipseg_masks"
if not os.path.isdir('cache'): os.mkdir('cache')
if not os.path.isdir(MASKS_FOLDER): os.mkdir(MASKS_FOLDER)
def generate_stds():
'''
Walks through folders saving standard deviations (for old experiments without it)
Folder hardcoded, this will run only once
'''
iterator = glob.iglob(os.path.join('/', "home", "diedre", "Dropbox", "anotebook", "models", "**", "*cons.txt"),
recursive=True)
for f in iterator:
dataset = os.path.basename(f).split('0.')
if len(dataset) == 1: # skipping old cons files
continue
print(dataset[0])
print(f)
with open(f, 'r') as fil:
whole_file = fil.read()
split_final_result = whole_file.split("Final result: ")
mean = float(split_final_result[-1])
consensus_list = np.array(json.loads(split_final_result[0])[-1])
std = consensus_list.std()
new_mean = consensus_list.mean()
assert mean == new_mean
print("consensus array: {}".format(consensus_list))
print("saved mean: {} recalculated mean: {}".format(mean, new_mean))
new_path = f.split(".txt")[0] + "_" + "std" + str(std) + ".txt"
with open(new_path, 'a+') as new_fil:
new_fil.write(whole_file)
new_fil.write("std: {}".format(std))
print("---------")
def hippodeep(folder="/home/diedre/git/hippodeep", display=False):
'''
Folder is where to look for .nii.gz files to run hippodeep
'''
with open("/home/diedre/git/diedre/logs/hippodeep_runs_{}.txt".format(time.ctime()), 'w') as logfile:
for f in tqdm(glob.glob(os.path.join(folder, "*.nii.gz"))):
try:
path = os.path.basename(f)[:5]
if path != "bruna" and path != "BRUNA":
print("Skipping {}".format(f))
print(f)
subprocess.run(["sh", "deepseg3.sh", os.path.basename(f)], stdout=logfile)
if display:
result = nib.load(f[:-7] + "_mask_L.nii.gz").get_fdata() + nib.load(f[:-7] + "_mask_R.nii.gz").get_fdata()
result[result > 1] = 1
viewnii(normalizeMri(nib.load(f).get_fdata()), result)
cv.destroyAllWindows()
except Exception as e:
print("HIPPODEEP FATAL ERROR: {}".format(e))
quit()
def dcm2niix(folder):
'''
Runs external dcm2niix utility on given folder
'''
subprocess.run(["/home/diedre/Downloads/NITRC-mricrogl-Downloads/mricrogl_linux/mricrogl_lx/dcm2niix", folder])
def freesurfer(folder, _format=".nii.gz", ncpu=None):
'''
Runs freesurfer run-all on a folder
'''
ncpus = cpu_count() if ncpu is None else ncpu
to_process = glob.glob(path_join(folder, "*" + _format))
number_of_jobs = len(to_process)
assert number_of_jobs > 0
print("Detected following volumes to process: {}".format(to_process))
batch_size = number_of_jobs//ncpus
if batch_size == 0:
batch_size = 1
print("Number of available threads: {}, batch size: {}.".format(ncpus, batch_size))
batchs = chunks(to_process, batch_size)
# Initialize workers
workers = [Process(target=freesurfer_worker, args=(batch,)) for batch in batchs]
print("Initialized {} workers for freesurfer processing.".format(len(workers)))
# Start workers
for worker in workers:
worker.start()
print("Started all workers.")
# Wait for workers to finish
for worker in tqdm(workers):
worker.join()
print("All workers done!")
def freesurfer_worker(batch):
'''
Freesurver run-all on all files on batch
'''
for vol in batch:
vol_name = os.path.basename(vol)
print("Transfering input to subjects folder...")
pre_log = open(path_join("logs", vol_name + "_preprocess_freesurfer_worker_log{}.txt".format(str(date.today()))), 'wb')
subprocess.run(["recon-all", "-i", vol, "-s", os.path.basename(vol)], stdout=pre_log)
pre_log.close()
print("Starting recon-all, this might take some hours.")
recon_log = open(path_join("logs", vol_name + "_freesurfer_worker_log{}.txt".format(str(date.today()))), 'wb')
subprocess.run(["recon-all", "-all", "-s", os.path.basename(vol)], stdout=recon_log)
recon_log.close()
def nii2niigz(folder):
'''
Converts nii files in folder to nii.gz
'''
for f in tqdm(glob.iglob(os.path.join(folder, "*.nii"))):
vol = nib.load(f)
nib.save(vol, f + ".gz")
def invert_matrix(hip_path, ref_path, saved_matrix):
'''
Inverts the FSL matrix and returns hip to original state
Returns path of final result
'''
if sys.platform == "win32":
try: # when running frozen with pyInstaller
flirt_executable = sys._MEIPASS+'\\flirt.exe'
convert_xfm_executable = sys._MEIPASS+'\\convert_xfm.exe'
except: # when running normally
flirt_executable = 'flirt.exe'
convert_xfm_executable = 'convert_xfm.exe'
else:
flirt_executable = 'flirt'
convert_xfm_executable = 'convert_xfm'
print("Inverting matrix... {}".format(hip_path))
my_env = os.environ.copy(); my_env["FSLOUTPUTTYPE"] = "NIFTI_GZ" # set FSLOUTPUTTYPE=NIFTI_GZ
subprocess.run([convert_xfm_executable, "-omat", INVERSE_MATRIX_PATH, "-inverse", saved_matrix], env=my_env)
print("Transforming back to original space...")
subprocess.run([flirt_executable, "-in", hip_path, "-ref", ref_path, "-out", "final_buffer.nii.gz", "-init", INVERSE_MATRIX_PATH,
"-applyxfm"], env=my_env)
save_path = os.path.normpath(ref_path + "_voxelcount-{}_e2dhipmask.nii.gz".format(int(nib.load("final_buffer.nii.gz").get_fdata().sum())))
try:
shutil.move("final_buffer.nii.gz", save_path)
os.remove(saved_matrix)
os.remove(INVERSE_MATRIX_PATH)
os.remove(hip_path)
except OSError as oe:
print("Error trying to remove post register matrix:")
traceback.print_exception(oe)
return
print("Post-Registrations done.")
return save_path
def adni_iso2mni(source_path=default_adni, save_path=mni_adni):
'''
Register all isometric ADNI volumes to MNI152
'''
print("Registering all adni volumes to mni152")
source_masks_path = os.path.join(source_path, "masks")
for s in tqdm(glob.glob(os.path.join(source_path, "samples", "*.nii.gz"))):
mask_name = os.path.basename(s).split(".nii.gz")[0]
mni152reg(s, os.path.join(source_masks_path, mask_name + ".nii.gz"),
save_path=os.path.join(save_path, "samples", mask_name),
mask_save_path=os.path.join(save_path, "masks", mask_name), remove=False, return_numpy=False)
def register_worker(q, save_path):
while True:
data = q.get()
if data is None:
return
else:
sample, mask, label, name = data
reg_sample, reg_mask = reg_handler(sample, mask=mask, uid=str(os.getpid()))
nib.save(nib.Nifti1Image(reg_sample, affine=None), path_join(save_path, "samples", HARP_CLASSES[label], str(name)))
nib.save(nib.Nifti1Image(reg_mask, affine=None), path_join(save_path, "masks", HARP_CLASSES[label], str(name)))
def harp2mni(source_path=default_harp, save_path=mni_harp):
'''
Register all HARP volumes to MNI152
'''
print("Registering HARP volumes to mni152")
harp = HARP("all", return_label=True, return_harp_id=True)
ncpu = cpu_count() - 1 # leave one thread for main process to fill the queue / general use
queue = Queue(maxsize=2) # less things hanging in queue = less memory usage
ps = []
# Initialize register workers
for i in range(ncpu):
p = Process(target=register_worker, args=(queue, save_path))
ps.append(p)
# Start workers
for p in ps:
p.start()
# Feed queue with all data to be registered
for i in tqdm(range(len(harp))):
sample, mask, label, harp_id = harp[i]
queue.put((sample, mask, label, "{}.nii.gz".format(harp_id)))
# Tell workers to stop
for i in range(ncpu):
queue.put(None)
# Wait for workers to finish
for p in ps:
p.join()
# Remove left over files
for cache in glob.iglob(path_join("cache", "*.nii.gz")):
try:
print("Deleting {}".format(cache))
os.remove(cache)
except FileNotFoundError:
print("File not found: {}".format(cache))
except Exception as e:
print("Error trying to cleanup cache {}".format(e))
def reg_handler(vol, mask=None, uid=''):
'''
Just calls the right registration processing function
'''
print("Registering input...")
if mask is None:
return reg_pre_post_single(vol, uid=uid)
else:
return reg_pre_post_pair(vol, mask, uid=uid)
def reg_pre_post_pair(vol, mask, uid=''):
'''
Pre and post processing of input volume and mask paths for mni152reg
'''
regworker = REGWorker(uid)
begin = time.time()
if type(vol) == np.ndarray and type(mask) == np.ndarray:
volpath = regworker.add_worker_id(PRE_REGISTER_VOL_PATH)
maskpath = regworker.add_worker_id(PRE_REGISTER_MASK_PATH)
nib.save(nib.Nifti1Image(vol, affine=None), volpath)
nib.save(nib.Nifti1Image(mask, affine=None), maskpath)
vol = volpath
mask = maskpath
elif not(type(vol) == str) and not(type(mask) == str):
raise ValueError("vol and mask should be a numpy volume or a path to the volume")
print("Input PATHS -> Vol: {}\nMask: {}".format(vol, mask))
vol, mask = mni152reg(vol, mask=mask, keep_matrix=True, worker_id=uid)
if vol.max() > 1.0 or vol.min() < 0 or mask.max() > 1.0 or mask.min() < 0:
print("WARNING: Data out of range, normalizing...")
vol = normalizeMri(vol.astype(np.float32)).squeeze()
mask = mask.astype(np.bool).astype(np.float32).squeeze()
print("Registration took {}s".format(time.time() - begin))
return vol, mask
def reg_pre_post_single(vol, uid=''):
'''
Pre and post processing of input volume for mni152reg
'''
regworker = REGWorker(uid)
begin = time.time()
# We want vol to be a path, but it can not be
if type(vol) == np.ndarray:
volpath = regworker.add_worker_id(PRE_REGISTER_VOL_PATH)
nib.save(nib.Nifti1Image(vol, affine=None), volpath)
vol = volpath
elif not(type(vol) == str):
raise ValueError("vol should be a numpy volume or a path to the volume")
print("Input PATH -> Vol: {}".format(vol))
vol = mni152reg(vol, mask=None, keep_matrix=True, worker_id=uid)
if vol.max() > 1.0 or vol.min() < 0:
print("Data out of range, normalizing...")
vol = normalizeMri(vol.astype(np.float32)).squeeze()
print("Registration took {}s".format(time.time() - begin))
return vol
def main(runlist, models, reg, batch_mode, results_dst):
'''
Main loop to run externally
'''
for arg in tqdm(runlist):
try:
print("Processing {}\n".format(arg))
if reg:
# Register
print("Performing MNI152 registration with FSL FLIRT due to -reg arg... This might take a few seconds...")
volpath = arg
arg = reg_handler(arg)
# Segment
vol, mask = run_once(None, models, numpy_input=arg, save=False, addch=True)
# Register back
nib.save(nib.Nifti1Image(mask, affine=None), TEMP_MASK_PATH)
mask_path = invert_matrix(TEMP_MASK_PATH, volpath, MNI_BUFFER_MATRIX_PATH)
else:
print("Not doing pre-registration, using orientation detector instead.")
vol, mask, mask_path = run_once(arg, models, save=True, return_mask_path=True, addch=True)
try:
check_file = os.path.join(results_dst, os.path.basename(mask_path))
print("Saving result to {}".format(check_file))
if os.path.isfile(check_file):
print("WARNING: Results for this volume already exist, overwriting...")
os.remove(check_file)
shutil.move(mask_path, results_dst)
except OSError as oe:
msg = "Error moving results: {}, do you have the right permissions?".format(oe)
print(msg)
error_dialog(msg)
quit()
if "-display" in argv:
print("Displaying results.")
viewnii(vol, mask=mask)
except Exception as e:
traceback.print_exc()
print("Error: {}, make sure your data is ok, and you have proper permissions. Please contact author in "
"https://github.com/dscarmo/e2dhipseg for issues".format(e))
if batch_mode:
print("Trying to continue... There might be errors in following segmentations.")
if __name__ == "__main__":
mask = None
try:
arg = argv[1]
folder = "/home/diedre/git/hippodeep"
if len(argv) >= 4:
mask = argv[3]
if arg != "hippodeep" and len(argv) >= 3:
folder = argv[2]
except IndexError:
arg = "run"
if arg == "harp2mni":
harp2mni()
elif arg == "hippodeep":
print("Due to author limitations, hippodeep must be run with terminal on the hippodeep folder, with the files on the same"
"folder")
hippodeep(folder)
elif arg == "freesurfer":
if "ncpu" in argv:
ncpu = int(argv[-1])
else:
ncpu = None
freesurfer(folder, ncpu=ncpu)
elif arg == "dcm2niix":
dcm2niix(folder)
elif arg == "nii2niigz":
nii2niigz(folder)
elif arg == "mni152reg":
print("Vol: {}\nMask: {}".format(folder, mask))
vol, mask = reg_handler(folder, mask)
viewnii(vol, mask, wait=0)
elif arg == "adni2mni":
adni_iso2mni()
elif arg == "generatestd":
generate_stds()
else:
batch_mode = False
runlist = []
if len(argv) == 1:
alert_dialog("Please give a nift volume to run on.")
arg = file_dialog()
if arg is None:
alert_dialog("No volume given, quitting.")
quit()
results_dst = os.path.join(os.path.dirname(arg), MASKS_FOLDER)
os.makedirs(results_dst, exist_ok=True)
print("Results will be in {}\n".format(os.path.join(arg, MASKS_FOLDER)))
reg = confirm_dialog("Do you want to register the volume to MNI152 space? (recommended, can take a few seconds)")
else:
if "-dir" in argv:
assert os.path.isdir(arg), "folder given in -d argument is not a folder!"
results_dst = os.path.join(arg, MASKS_FOLDER)
batch_mode = True
else:
results_dst = os.path.join(os.path.dirname(arg), MASKS_FOLDER)
assert os.path.isfile(arg), ("File not found. Make sure the path for your nii input volume {} is correct. If its"
"a directory use -dir".format(arg))
os.makedirs(results_dst, exist_ok=True)
print("Results will be in {}\n".format(os.path.join(arg, MASKS_FOLDER)))
reg = "-reg" in argv
print("Running pre-saved weights best model in {}".format(arg))
if sys.platform == "win32":
try: weights_path = sys._MEIPASS+'\\weights' # when running frozen with pyInstaller
except: weights_path = 'weights' # when running normally
else: weights_path = 'weights'
models = get_models(False, True, True, False, True, False, dim='2d', model_folder=weights_path, verbose=False,
out_channels=2, apply_sigmoid=False, apply_softmax=True)
if batch_mode:
runlist = glob.glob(os.path.join(arg, "*.nii")) + glob.glob(os.path.join(arg, "*.nii.gz"))
print("Running segmentation on the following files: {}\n".format(runlist))
main(runlist, models, reg, batch_mode, results_dst)
else:
runlist.append(arg)
main(runlist, models, reg, batch_mode, results_dst)
|
nn_sq_train_grpc.py
|
# -*- coding: utf-8 -*-
"""neural_network square.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/tseth92/NeuralNet_Square/blob/master/neural_network_square.ipynb
"""
'''Training a neural network to predict the square of a number'''
import numpy as np
import matplotlib.pyplot as pp
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras.layers.core import Dense
from keras import backend
import pickle
import time
import grpc
from flask import request
from flask import Response
from flask import json
from flask_api import FlaskAPI
from flask import Response
import nn_sq_pb2_grpc
import nn_sq_pb2
import threading
import multiprocessing
from multiprocessing import Process, Manager
import os
import redis
'''configurations for neural net'''
APP = FlaskAPI(__name__)
'''creates random samples of n_samples rows which are between 0 to 1.
mid_range decides the range under which the samples will be created.'''
def get_data(n_samples, mid_range):
X = np.random.random((n_samples,1))*mid_range-(mid_range/2)
# eg. if n_samples = 10000, mid_range = 10 then it will create samples b/w
# 0 and 5 positive and negative
y = X*X
#print(X)
return X,y
''' creating the neural net model of 1:20:1; relu activation, mse as loss and
adam optimizer'''
def get_model():
model = Sequential()
model.add(Dense(20, input_shape=(1,), activation='relu'))
model.add(Dense(1))
print(model.summary())
model.compile(loss='mse', optimizer='adam')
return model
''' train the model for specified number of epochs, batch_size'''
@APP.route("/trainNNSq", methods=['GET'])
def train_model():
print('in train model')
#return "hello"
manager = Manager()
m_dict = manager.dict()
n_samples = 100000 # number of samples between 0 and mid_range
epochs = 20
batch_size = 1000
mid_range = 10 # range within which data is required
IP = 'nn-sq-predict-svc'
PORT = ':5001'
response = 'Failure'
X,y = get_data(n_samples, mid_range)
#pp.figure(figsize=(10,3))
#pp.plot(X,y,'.')
model = get_model()
validation_split = 0.2
verbose = 1
queue = multiprocessing.Queue()
t1 = Process(target = fit_model, args=(model, X, y, validation_split, epochs, batch_size, verbose, queue, m_dict))
t1.start()
t1.join()
print('going to dump model')
model_name = m_dict['model_name']
print('model_name_received: ', model_name)
#//////////////// GRPC CALL ////////////////#
channel = grpc.insecure_channel(IP+PORT)
#with grpc.insecure_channel(IP+PORT) as channel:
print('in with')
stub = nn_sq_pb2_grpc.NNTrainPredictStub(channel)
pred_request = nn_sq_pb2.NNRequest(operation=2,model_name='nn_sq_'+model_name)
print(pred_request)
def yield_response(pred_request):
print('in yield_response')
for resp in stub.PredictModel(pred_request):
print(resp.progress)
yield str(resp.progress)+'\n'
print('going to call yield_response')
return Response(yield_response(pred_request))
#return 'hello'
def fit_model(model, X, y, validation_split, epochs, batch_size, verbose, queue, m_dict):
print('pid child is: ', os.getpid())
h = model.fit(X, y, validation_split=0.2,
epochs=epochs,
batch_size=batch_size,
verbose=1)
model_name = str(time.time()).split('.')[0]
print('putting to queue_model_name', model_name)
queue.put('nn_sq_'+model_name)
m_dict['model_name'] = model_name
pickle.dump(model, open('/mnt/nn-disk/nn_sq_'+model_name, 'wb'))
if __name__ == '__main__' :
print('in main')
APP.run(host='0.0.0.0', port=5000)
|
data_flow_system.py
|
#!/usr/bin/python
import urllib
import urllib2
import xml.etree.ElementTree as ET
import exifread
from pymongo import MongoClient
import pymongo.errors
import Queue
import threading
# Get XML list of photos
req = urllib2.Request('https://s3.amazonaws.com/waldo-recruiting')
try: response = urllib2.urlopen(req)
except urllib2.HTTPError as e:
print e.reason
except urllib2.URLError as e:
print e.reason
else:
f = response.read()
# TODO Authenticate xml or other security precaution
# create element tree structure list of photos
root = ET.fromstring(f)
ns = '{http://s3.amazonaws.com/doc/2006-03-01/}'
q = Queue.Queue()
for contents in root.findall(ns + 'Contents'):
file_name = contents.find(ns + 'Key').text
if file_name[-3:] in ('jpg', 'JPG', 'tif', 'TIF', 'wav', 'WAV'):
q.put(file_name)
# Inform user a file was not used because it does not have exif info?
#create mongo connection
client = MongoClient()
db = client.test
db.photos.ensure_index('photo_name', unique=True)
# Get photos and parse EXIF info
def worker(queue):
queue_full = True
while queue_full:
try:
file_name = queue.get(False)
req = urllib2.Request('https://s3.amazonaws.com/waldo-recruiting' + '/' + file_name)
try: response = urllib2.urlopen(req)
except urllib2.HTTPError as e:
print 'File "' + file_name + '":\n download error: ' + e.reason + '\n'
except urllib2.URLError as e:
print 'File "' + file_name + '":\n download error: ' + e.reason + '\n'
else:
# I couldn't figure out how to open a url with binary transfer - there's got to be a way
# So I saved locally and reopened - which seems wastefull.
fh = open('photos/'+file_name, 'w')
fh.write(response.read())
fh.close()
f = open('photos/'+file_name)
tags = exifread.process_file(f)
picDict = {}
picDict['photo_name'] = file_name
print 'Processing Image: ' + file_name
for tag in tags.keys():
if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename', 'EXIF MakerNote'):
try:
picDict[unicode(tag).encode('utf-8')] = tags[tag].printable.encode('utf-8')
except Exception as e: # Better error handling is in order here
print 'Encoding error. Tag "' + tag + '" for file "' + file_name + '" not inserted into DB.\n'
pass
# Insert photo info into mongodb
try: result = db.photos.insert_one(picDict)
except pymongo.errors.DuplicateKeyError as e:
print e.message + 'No Insert'
pass
else:
print 'Inserting into DB:'
print result
print '\n'
except Queue.Empty:
queue_full = False
thread_count = 5
for i in range(thread_count):
t = threading.Thread(target=worker, args = (q,))
t.start()
|
project.py
|
import logging
from threading import Thread
import django_filters
import rest_framework
from rest_framework import filters, mixins, permissions, status
from rest_framework.decorators import action
from rest_framework.exceptions import APIException
from rest_framework.pagination import CursorPagination
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from rest_framework_extensions.mixins import NestedViewSetMixin
from rest_framework_extensions.routers import ExtendedSimpleRouter
from ..models.github import GitHubRepo, GitHubRepoBusyException
from ..models.project import Project, ProjectFunction
from ..models.scratch import Scratch
from ..serializers import (
ProjectFunctionSerializer,
ProjectSerializer,
ScratchSerializer,
TerseScratchSerializer,
)
logger = logging.getLogger(__name__)
class NotProjectMaintainer(APIException):
status_code = status.HTTP_403_FORBIDDEN
default_detail = "You must be a project maintainer to perform this action."
class ProjectPagination(CursorPagination):
ordering = "-creation_time"
page_size = 20
page_size_query_param = "page_size"
max_page_size = 100
class ProjectFunctionPagination(CursorPagination):
ordering = "-creation_time"
page_size = 20
page_size_query_param = "page_size"
max_page_size = 100
class IsProjectMemberOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
assert isinstance(obj, Project)
return request.method in permissions.SAFE_METHODS or obj.is_member(
request.profile
)
class ProjectViewSet(
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
GenericViewSet,
):
queryset = Project.objects.all()
pagination_class = ProjectPagination
serializer_class = ProjectSerializer
permission_classes = [IsProjectMemberOrReadOnly]
@action(detail=True, methods=["POST"])
def pull(self, request, pk):
project: Project = self.get_object()
repo: GitHubRepo = project.repo
if not project.is_member(request.profile):
raise NotProjectMaintainer()
if not repo.is_pulling:
t = Thread(target=GitHubRepo.pull, args=(project.repo,))
t.start()
repo.is_pulling = True # Respond with is_pulling=True; the thread will save is_pulling=True to the DB
return Response(
ProjectSerializer(project, context={"request": request}).data,
status=status.HTTP_202_ACCEPTED,
)
class ProjectFunctionViewSet(
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet,
):
pagination_class = ProjectFunctionPagination
serializer_class = ProjectFunctionSerializer
filter_fields = ["rom_address", "is_matched_in_repo"]
filter_backends = [
django_filters.rest_framework.DjangoFilterBackend,
filters.SearchFilter,
]
search_fields = ["display_name"]
def get_queryset(self):
return ProjectFunction.objects.filter(project=self.kwargs["parent_lookup_slug"])
@action(detail=True, methods=["GET", "POST"])
def attempts(self, request, **kwargs):
fn: ProjectFunction = self.get_object()
project: Project = fn.project
repo: GitHubRepo = project.repo
if request.method == "GET":
attempts = Scratch.objects.filter(project_function=fn).order_by(
"-last_updated"
)
return Response(
TerseScratchSerializer(
attempts, many=True, context={"request": request}
).data
)
elif request.method == "POST":
if repo.is_pulling:
raise GitHubRepoBusyException()
scratch = fn.create_scratch()
if scratch.is_claimable():
scratch.owner = request.profile
scratch.save()
return Response(
ScratchSerializer(scratch, context={"request": request}).data,
status=status.HTTP_201_CREATED,
)
else:
raise Exception("Unsupported method")
router = ExtendedSimpleRouter(trailing_slash=False)
(
router.register(r"projects", ProjectViewSet).register(
r"functions",
ProjectFunctionViewSet,
basename="projectfunction",
parents_query_lookups=["slug"],
)
)
|
mixins.py
|
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division,
)
nstr = str
str = type('')
import inspect
import weakref
from functools import wraps
from threading import Event
from collections import deque
from time import time
try:
from statistics import median
except ImportError:
from .compat import median
from .threads import GPIOThread
from .exc import (
BadEventHandler,
BadWaitTime,
BadQueueLen,
DeviceClosed,
)
class ValuesMixin(object):
"""
Adds a :attr:`values` property to the class which returns an infinite
generator of readings from the :attr:`value` property. There is rarely a
need to use this mixin directly as all base classes in GPIO Zero include
it.
.. note::
Use this mixin *first* in the parent class list.
"""
@property
def values(self):
"""
An infinite iterator of values read from `value`.
"""
while True:
try:
yield self.value
except DeviceClosed:
break
class SourceMixin(object):
"""
Adds a :attr:`source` property to the class which, given an iterable, sets
:attr:`value` to each member of that iterable until it is exhausted. This
mixin is generally included in novel output devices to allow their state to
be driven from another device.
.. note::
Use this mixin *first* in the parent class list.
"""
def __init__(self, *args, **kwargs):
self._source = None
self._source_thread = None
self._source_delay = 0.01
super(SourceMixin, self).__init__(*args, **kwargs)
def close(self):
try:
super(SourceMixin, self).close()
except AttributeError:
pass
self.source = None
def _copy_values(self, source):
for v in source:
self.value = v
if self._source_thread.stopping.wait(self._source_delay):
break
@property
def source_delay(self):
"""
The delay (measured in seconds) in the loop used to read values from
:attr:`source`. Defaults to 0.01 seconds which is generally sufficient
to keep CPU usage to a minimum while providing adequate responsiveness.
"""
return self._source_delay
@source_delay.setter
def source_delay(self, value):
if value < 0:
raise BadWaitTime('source_delay must be 0 or greater')
self._source_delay = float(value)
@property
def source(self):
"""
The iterable to use as a source of values for :attr:`value`.
"""
return self._source
@source.setter
def source(self, value):
if self._source_thread is not None:
self._source_thread.stop()
self._source_thread = None
self._source = value
if value is not None:
self._source_thread = GPIOThread(target=self._copy_values, args=(value,))
self._source_thread.start()
class SharedMixin(object):
"""
This mixin marks a class as "shared". In this case, the meta-class
(GPIOMeta) will use :meth:`_shared_key` to convert the constructor
arguments to an immutable key, and will check whether any existing
instances match that key. If they do, they will be returned by the
constructor instead of a new instance. An internal reference counter is
used to determine how many times an instance has been "constructed" in this
way.
When :meth:`close` is called, an internal reference counter will be
decremented and the instance will only close when it reaches zero.
"""
_INSTANCES = {}
def __del__(self):
self._refs = 0
super(SharedMixin, self).__del__()
@classmethod
def _shared_key(cls, *args, **kwargs):
"""
Given the constructor arguments, returns an immutable key representing
the instance. The default simply assumes all positional arguments are
immutable.
"""
return args
class EventsMixin(object):
"""
Adds edge-detected :meth:`when_activated` and :meth:`when_deactivated`
events to a device based on changes to the :attr:`~Device.is_active`
property common to all devices. Also adds :meth:`wait_for_active` and
:meth:`wait_for_inactive` methods for level-waiting.
.. note::
Note that this mixin provides no means of actually firing its events;
call :meth:`_fire_events` in sub-classes when device state changes to
trigger the events. This should also be called once at the end of
initialization to set initial states.
"""
def __init__(self, *args, **kwargs):
super(EventsMixin, self).__init__(*args, **kwargs)
self._active_event = Event()
self._inactive_event = Event()
self._when_activated = None
self._when_deactivated = None
self._last_state = None
self._last_changed = time()
def wait_for_active(self, timeout=None):
"""
Pause the script until the device is activated, or the timeout is
reached.
:param float timeout:
Number of seconds to wait before proceeding. If this is ``None``
(the default), then wait indefinitely until the device is active.
"""
return self._active_event.wait(timeout)
def wait_for_inactive(self, timeout=None):
"""
Pause the script until the device is deactivated, or the timeout is
reached.
:param float timeout:
Number of seconds to wait before proceeding. If this is ``None``
(the default), then wait indefinitely until the device is inactive.
"""
return self._inactive_event.wait(timeout)
@property
def when_activated(self):
"""
The function to run when the device changes state from inactive to
active.
This can be set to a function which accepts no (mandatory) parameters,
or a Python function which accepts a single mandatory parameter (with
as many optional parameters as you like). If the function accepts a
single mandatory parameter, the device that activated will be passed
as that parameter.
Set this property to ``None`` (the default) to disable the event.
"""
return self._when_activated
@when_activated.setter
def when_activated(self, value):
self._when_activated = self._wrap_callback(value)
@property
def when_deactivated(self):
"""
The function to run when the device changes state from active to
inactive.
This can be set to a function which accepts no (mandatory) parameters,
or a Python function which accepts a single mandatory parameter (with
as many optional parameters as you like). If the function accepts a
single mandatory parameter, the device that deactivated will be
passed as that parameter.
Set this property to ``None`` (the default) to disable the event.
"""
return self._when_deactivated
@when_deactivated.setter
def when_deactivated(self, value):
self._when_deactivated = self._wrap_callback(value)
@property
def active_time(self):
"""
The length of time (in seconds) that the device has been active for.
When the device is inactive, this is ``None``.
"""
if self._active_event.wait(0):
return time() - self._last_changed
else:
return None
@property
def inactive_time(self):
"""
The length of time (in seconds) that the device has been inactive for.
When the device is active, this is ``None``.
"""
if self._inactive_event.wait(0):
return time() - self._last_changed
else:
return None
def _wrap_callback(self, fn):
if fn is None:
return None
elif not callable(fn):
raise BadEventHandler('value must be None or a callable')
elif inspect.isbuiltin(fn):
# We can't introspect the prototype of builtins. In this case we
# assume that the builtin has no (mandatory) parameters; this is
# the most reasonable assumption on the basis that pre-existing
# builtins have no knowledge of gpiozero, and the sole parameter
# we would pass is a gpiozero object
return fn
else:
# Try binding ourselves to the argspec of the provided callable.
# If this works, assume the function is capable of accepting no
# parameters
try:
inspect.getcallargs(fn)
return fn
except TypeError:
try:
# If the above fails, try binding with a single parameter
# (ourselves). If this works, wrap the specified callback
inspect.getcallargs(fn, self)
@wraps(fn)
def wrapper():
return fn(self)
return wrapper
except TypeError:
raise BadEventHandler(
'value must be a callable which accepts up to one '
'mandatory parameter')
def _fire_activated(self):
# These methods are largely here to be overridden by descendents
if self.when_activated:
self.when_activated()
def _fire_deactivated(self):
# These methods are largely here to be overridden by descendents
if self.when_deactivated:
self.when_deactivated()
def _fire_events(self):
old_state = self._last_state
new_state = self._last_state = self.is_active
if old_state is None:
# Initial "indeterminate" state; set events but don't fire
# callbacks as there's not necessarily an edge
if new_state:
self._active_event.set()
else:
self._inactive_event.set()
elif old_state != new_state:
self._last_changed = time()
if new_state:
self._inactive_event.clear()
self._active_event.set()
self._fire_activated()
else:
self._active_event.clear()
self._inactive_event.set()
self._fire_deactivated()
class HoldMixin(EventsMixin):
"""
Extends :class:`EventsMixin` to add the :attr:`when_held` event and the
machinery to fire that event repeatedly (when :attr:`hold_repeat` is
``True``) at internals defined by :attr:`hold_time`.
"""
def __init__(self, *args, **kwargs):
self._hold_thread = None
super(HoldMixin, self).__init__(*args, **kwargs)
self._when_held = None
self._held_from = None
self._hold_time = 1
self._hold_repeat = False
self._hold_thread = HoldThread(self)
def close(self):
if self._hold_thread:
self._hold_thread.stop()
self._hold_thread = None
try:
super(HoldMixin, self).close()
except AttributeError:
pass
def _fire_activated(self):
super(HoldMixin, self)._fire_activated()
self._hold_thread.holding.set()
def _fire_deactivated(self):
self._held_from = None
super(HoldMixin, self)._fire_deactivated()
def _fire_held(self):
if self.when_held:
self.when_held()
@property
def when_held(self):
"""
The function to run when the device has remained active for
:attr:`hold_time` seconds.
This can be set to a function which accepts no (mandatory) parameters,
or a Python function which accepts a single mandatory parameter (with
as many optional parameters as you like). If the function accepts a
single mandatory parameter, the device that activated will be passed
as that parameter.
Set this property to ``None`` (the default) to disable the event.
"""
return self._when_held
@when_held.setter
def when_held(self, value):
self._when_held = self._wrap_callback(value)
@property
def hold_time(self):
"""
The length of time (in seconds) to wait after the device is activated,
until executing the :attr:`when_held` handler. If :attr:`hold_repeat`
is True, this is also the length of time between invocations of
:attr:`when_held`.
"""
return self._hold_time
@hold_time.setter
def hold_time(self, value):
if value < 0:
raise BadWaitTime('hold_time must be 0 or greater')
self._hold_time = float(value)
@property
def hold_repeat(self):
"""
If ``True``, :attr:`when_held` will be executed repeatedly with
:attr:`hold_time` seconds between each invocation.
"""
return self._hold_repeat
@hold_repeat.setter
def hold_repeat(self, value):
self._hold_repeat = bool(value)
@property
def is_held(self):
"""
When ``True``, the device has been active for at least
:attr:`hold_time` seconds.
"""
return self._held_from is not None
@property
def held_time(self):
"""
The length of time (in seconds) that the device has been held for.
This is counted from the first execution of the :attr:`when_held` event
rather than when the device activated, in contrast to
:attr:`~EventsMixin.active_time`. If the device is not currently held,
this is ``None``.
"""
if self._held_from is not None:
return time() - self._held_from
else:
return None
class HoldThread(GPIOThread):
"""
Extends :class:`GPIOThread`. Provides a background thread that repeatedly
fires the :attr:`HoldMixin.when_held` event as long as the owning
device is active.
"""
def __init__(self, parent):
super(HoldThread, self).__init__(target=self.held, args=(parent,))
self.holding = Event()
self.start()
def held(self, parent):
while not self.stopping.wait(0):
if self.holding.wait(0.1):
self.holding.clear()
while not (
self.stopping.wait(0) or
parent._inactive_event.wait(parent.hold_time)
):
if parent._held_from is None:
parent._held_from = time()
parent._fire_held()
if not parent.hold_repeat:
break
class GPIOQueue(GPIOThread):
"""
Extends :class:`GPIOThread`. Provides a background thread that monitors a
device's values and provides a running *average* (defaults to median) of
those values. If the *parent* device includes the :class:`EventsMixin` in
its ancestry, the thread automatically calls
:meth:`~EventsMixin._fire_events`.
"""
def __init__(
self, parent, queue_len=5, sample_wait=0.0, partial=False,
average=median):
assert callable(average)
super(GPIOQueue, self).__init__(target=self.fill)
if queue_len < 1:
raise BadQueueLen('queue_len must be at least one')
if sample_wait < 0:
raise BadWaitTime('sample_wait must be 0 or greater')
self.queue = deque(maxlen=queue_len)
self.partial = bool(partial)
self.sample_wait = float(sample_wait)
self.full = Event()
self.parent = weakref.proxy(parent)
self.average = average
@property
def value(self):
if not self.partial:
self.full.wait()
try:
return self.average(self.queue)
except ZeroDivisionError:
# No data == inactive value
return 0.0
def fill(self):
try:
while (not self.stopping.wait(self.sample_wait) and
len(self.queue) < self.queue.maxlen):
self.queue.append(self.parent._read())
if self.partial and isinstance(self.parent, EventsMixin):
self.parent._fire_events()
self.full.set()
while not self.stopping.wait(self.sample_wait):
self.queue.append(self.parent._read())
if isinstance(self.parent, EventsMixin):
self.parent._fire_events()
except ReferenceError:
# Parent is dead; time to die!
pass
|
exchange_checker_gtk.py
|
"""
GTK Exchange checker application
"""
import threading
import time
import gi
from tzlocal import get_localzone
gi.require_version("Gtk", "3.0")
gi.require_version("AppIndicator3", "0.1")
# pylint: disable=wrong-import-position
from gi.repository import AppIndicator3 as appindicator, GLib, Gtk as gtk
from exchange_checker import DEFAULT_CHECK_PERIOD_IN_MINUTES, Checker, get_conf
def main():
"""Main gtk app"""
indicator = appindicator.Indicator.new(
"exchangeChecker", "calendar", appindicator.IndicatorCategory.APPLICATION_STATUS
)
indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
indicator.set_title("Exchange Checker")
def create_menu(with_event=True):
"""Replace the app menu each time it is called"""
menu = gtk.Menu()
command_one = gtk.MenuItem("Check now")
command_one.connect("activate", check_now)
menu.append(command_one)
menu.append(gtk.SeparatorMenuItem())
if with_event:
checker.store_next_events()
checker.notify_if_in_less()
for event in checker.next_events:
event_item = gtk.MenuItem(
f"{event['subject']}"
f" in {Checker.remaining_time_text(event)}"
f" at {event['start'].astimezone(get_localzone()).strftime('%H:%M')}"
)
event_item.connect("activate", event_pressed, event)
menu.append(event_item)
menu.append(gtk.SeparatorMenuItem())
exittray = gtk.MenuItem("Exit")
exittray.connect("activate", gtk_quit)
menu.append(exittray)
menu.show_all()
indicator.set_menu(menu)
def check_now(_):
"""Refresh events list"""
checker.store_next_events()
checker.notify_if_in_less()
def event_pressed(_, event):
"""Called when event item is selected in the menu"""
dialog = gtk.MessageDialog(
transient_for=None,
flags=0,
message_type=gtk.MessageType.INFO,
buttons=gtk.ButtonsType.OK,
text=f"{event['subject']}\n",
)
dialog.format_secondary_text(
f"Location: {event['location']}\n\n"
f"Start: {event['start'].astimezone(get_localzone()).strftime('%y-%m-%d %H:%M')}\n\n"
f"Sensitivity: {event['sensitivity']}"
)
dialog.set_icon_from_file("schedule.png")
dialog.run()
dialog.destroy()
def check_loop():
"""Thread look to check and call menu update"""
while True:
GLib.idle_add(create_menu)
time.sleep(DEFAULT_CHECK_PERIOD_IN_MINUTES * 60)
username, password, address = get_conf()
checker = Checker(username, password, address)
thread = threading.Thread(target=check_loop)
thread.daemon = True
thread.start()
create_menu(with_event=False)
gtk.main()
def gtk_quit(_):
"""Quit the application"""
gtk.main_quit()
if __name__ == "__main__":
main()
|
server.py
|
from flask import Flask , render_template , request
from flask import jsonify
import subprocess # nosec #pylint-disable type: ignore
import os
import json_config
import pafy
import vlc
from modules import youtube_videos
from modules import coverpy
from flask_cors import CORS
import threading
app = Flask(__name__)
CORS(app)
Instance = vlc.Instance('--no-video')
player = Instance.media_player_new()
url = ''
youtube = youtube_videos.youtube_results()
coverpy = coverpy.CoverPy()
class search_play_recommend:
def search(self, search_query):
search = youtube.youtube_search(search_query)
art = coverpy.art(search_query)
result = dict([('title', search[0][0]), ('id', search[0][1]), ('album art', art)])
return(result)
def play(self, video_id):
url = 'https://www.youtube.com/watch?v=' + video_id
video = pafy.new(url)
streams = video.audiostreams
best = streams[3]
playurl = best.url
# print(playurl)
return playurl
def recommend(self, video_id):
related_result = youtube.youtube_related(video_id)
print("Realted Resut is", related_result)
items = []
for video in related_result:
print(video)
items.append(dict([('title', video[0]), ('id', video[1]), ('img_url',video[2])]))
return items
song = search_play_recommend()
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/')
def index():
return render_template('index.html')
@app.route('/search', methods=['GET'])
def w_youtube():
search_query = request.args.get('vid')
res = song.search(search_query)
resp = jsonify(res)
resp.status_code = 200
return resp
@app.route('/recommend', methods=['GET'])
def recommended_songs():
video_id = request.args.get('vid')
print(video_id)
recommended = song.recommend(video_id)
print(recommended)
res = {"items" : recommended}
resp = jsonify(res)
resp.status_code = 200
return resp
@app.route('/recommend_carousel', methods=['GET'])
def carousel():
response = youtube.recommended_carousel()
title,vid = [i[0] for i in response] , [i[1] for i in response]
print(title,vid)
display_message = {"titles": title, "videos": vid}
resp = jsonify(display_message)
resp.status_code = 200
return resp
@app.route('/weekly_tops', methods=['GET'])
def weekly_tops():
response = youtube.weekly_top()
title,vid = [i[0] for i in response] , [i[1] for i in response]
print(title,vid)
display_message = {"titles": title, "videos": vid}
resp = jsonify(display_message)
resp.status_code = 200
return resp
@app.route('/play', methods=['GET'])
def wo_youtube():
video_id = request.args.get('vid')
url = song.play(video_id)
print(url)
display_message = {"status":"song started","url":url}
resp = jsonify(display_message)
resp.status_code = 200
return resp
# def runFlaskApp1():
# app.run(host='127.0.0.1', port=7070, debug=True, threaded=True)
if __name__ == '__main__':
# t1 = threading.Thread(target=runFlaskApp1)
# t1.start()
app.run(debug=True)
|
clientMQTT.py
|
from serial import *
import serial.tools.list_ports
import paho.mqtt.client as mqtt
import time
import sys
import os
import threading
## network parameters
# remote
# HOST = '169.254.1.1'
# local
HOST = '127.0.0.1'
PORT = 1883 # default mqtt port
## serial parameters
# windows
WIN = True
LINUX = not(WIN)
if WIN:
SERIALPATH = 'COM10'
SERIAL_ROOT = ''
SERIAL_PREFIX = 'COM'
else:
SERIALPATH = '/dev/ttyACM0'
SERIAL_ROOT = '/dev/'
SERIAL_PREFIX = 'ttyACM'
SLEEP_TIME = 0.01
WATCHDOG_TIMEOUT = 5
## mqtt field variables init
distance = 0
anchor_id = ''
bot_id = ''
rssi = 0
## mqtt format
ROOT = 'SecureLoc/anchors_data/'
COOPERATIVE_ROOT = 'SecureLoc/cooperative_data/'
TOPIC_SERIAL = 'Serial'
MQTT_CONFIG_FILE = 'MQTT_topics.txt'
## SX1280 topics- all the measured data sent by SX280 on serial part
## formatted as topic1_val|topic2_val|...topicN_val#
SX1280_TOPICS = ["master_id", "slave_id", "distance", "time_of_flight", "fei", "rssi"]
NB_DATA = len(SX1280_TOPICS)
## correspence between SX1280 topics and SecureLoc MQTT topics
MQTT_TOPICS = {
"slave_id":"anchor_id",
"master_id":"bot_id",
"distance":"distance",
"time_of_flight":"t1",
"fei":"t2",
"rssi":"rssi"}
## serial containers & flags
devices = []
connected_anchors = {}
serial_ports = {}
exit_flag = False
class ClientStub(object):
def __getattribute__(self,attr):
def method(*args, **kwargs):
pass
return method
def __setattribute__(self,attr):
pass
## starting mqtt client
# stubing mqtt if no broker is found
try:
mqttc = mqtt.Client()
mqttc.connect(HOST, PORT, 60)
mqttc.loop_start()
except:
mqttc = ClientStub()
def on_message(mqttc,obj,msg):
"""handles data resfreshing when a mqtt message is received"""
labels = msg.topic.split("/")
serial_message = msg.payload
anchorID = labels[-2]
print("Sending serial command to anchor " + anchorID + " :" + serial_message.encode())
for port_name, ID in connected_anchors.items():
if ID == anchorID:
serial_ports[port_name].write(serial_message)
mqttc.on_message = on_message
def getSerialPorts():
"""returns all the serial devices connect as a list"""
ports = []
if SERIAL_PREFIX == 'COM':
# windows system
files = [port.device for port in list(serial.tools.list_ports.comports())]
else:
# linux
files= os.listdir(SERIAL_ROOT)
for entry in files:
if entry.startswith(SERIAL_PREFIX):
ports.append(entry)
print('found serial devices: ' + str(ports) )
return(ports)
def openPort(path = SERIALPATH):
"""open the serial port for the given path"""
try:
port = Serial(path, baudrate = 115200)
except :
print("No serial device on the given path :" + path)
sys.exit()
return(port)
def processLine(line, port):
"""parses the serial line received. If it is a DATA frame, publishes the data on MQTT.
DEBUG frames will be ignored"""
if line and line[0] == '*':
data = line[1:].split("|")
if len(data) < NB_DATA:
print('received frame is not compliant with the expected data format')
if len(data) >= NB_DATA:
anchor_id = data[0]
bot_id = data[1]
port_name = port.name.split("/")[-1]
if (port_name in connected_anchors) and not(connected_anchors[port_name]):
connected_anchors[port_name] = anchor_id
print("subscribing to " + ROOT + anchor_id + "/" + TOPIC_SERIAL )
mqttc.subscribe(ROOT + anchor_id + "/" + TOPIC_SERIAL)
# publishing to MQTT
data_to_publish = {}
for sx_topic, value in zip(SX1280_TOPICS, data):
mqtt_topic = MQTT_TOPICS[sx_topic]
data_to_publish[mqtt_topic] = value
# getting ids
anchor_id = data_to_publish["anchor_id"]
bot_id = data_to_publish["bot_id"]
for topic in [key for key in data_to_publish if (key != "anchor_id" and key != "bot_id")]:
value = data_to_publish[topic]
print(ROOT + str(anchor_id) + "/" + str(bot_id) + "/" + topic, value )
mqttc.publish(ROOT + str(anchor_id) + "/" + str(bot_id) + "/" + topic, value )
def rebootTeensy():
"""reboot the TeensyDuino.
Reflashing Teensyduino is required given that no remote soft reset is provided besides the bootloader"""
print("Resetting Teensy...")
os.system('/home/pi/Desktop/teensy_loader_cli -mmcu=mk20dx256 -s -v ' + '/home/pi/Desktop/node*.hex')
def waitSerialDevice(path):
"""waits until the given serial device is connected"""
print("Waiting for serial device...")
while ( not(os.path.exists(path)) ):
time.sleep(1)
print("Serial device found")
def readSerial(port):
"""reads the given serial port line by line"""
global exit_flag
print('Reading serial port ' + port.name)
exit_flag = False
try:
while True:
if (port.in_waiting > 0 ):
line = port.readline().decode('utf-8').strip()
print(line)
try:
processLine(line, port)
except:
print("process line failed")
except KeyboardInterrupt:
print("Ctrl + C received, quitting")
exit_flag = True
except:
print('An error occured, did you unplug the device ?')
print('Stopped reading ' + port.name)
port.close()
def handleSerialDevice(path = SERIALPATH):
"""opens the serial port and starts reading it"""
port = openPort(path)
serial_ports[port.name.split("/")[-1]] = port
readSerial(port)
# Program
# ports = getSerialPorts()
# path = SERIALPATH
# def serialPool():
# """checks for new serial devices connecting, opens and read device when detected"""
# global exit_flag
# # devices = getSerialPorts()
# devices = [SERIALPATH]
# threads_pool = []
# # checking if any serial device is connected
# if len(devices) > 0:
# for device in devices:
# # creating a thread reading serial port
# threads_pool.append(threading.Thread(target = handleSerialDevice, args = (SERIAL_ROOT + device,), daemon = True ))
# connected_anchors[device] = None
# else:
# # waiting for any device to connect;
# waitSerialDevice(SERIALPATH)
# # starting threads
# for thread in threads_pool:
# thread.start()
# while not(exit_flag):
# try:
# time.sleep(0.5)
# except KeyboardInterrupt:
# print(" received, quitting program...")
# exit_flag = True
# for thread in threads_pool:
# thread.join()
# sys.exit()
# serialPool()
handleSerialDevice()
|
main_terminalinfo.py
|
import fcntl
import sys
import threading
import os
import termios
import tty
from select import select
from main_terminalGetKey import getKey
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
"""
In [24]: input_key = ""
...: clean_key = ""
...: while True:
...: with Raw(sys.stdin):
...: if not select([sys.stdin], [], [], 0.1)[0]:
...: continue
...: input_key += sys.stdin.read(1)
...: if input_key == "\033":
...: with Nonblocking(sys.stdin):
...: input_key += sys.stdin.read(20)
...: if input_key.startswith("\033[<"):
...: _ = sys.stdin.read(1000)
...: if input_key == "\033":
...: clean_key = "escape"
...: elif input_key == "\\":
...: clean_key = "\\"
...:# clean input_key et clean_key
...: print(f"{input_key=}, {clean_key=}")
"""
class Key:
stopping = False
started = False
thread: threading.Thread
@classmethod
def start(self):
self.thread = threading.Thread(target=self._getkey)
self.started = True
self.stopping = False
self.thread.start()
@classmethod
def stop(self):
if self.started and self.thread.is_alive():
self.stopping = True
self.started = False
@classmethod
def _getkey(self):
i = 0
while not self.stopping:
i += 1
print(f"\033[13;13H{i}{self.stopping}")
key = getKey()
if key == "a":
print("\033[5;5HHe press A")
elif key == "q" or key == "\x03":
clean_quit()
class Collector:
stopping = False
started = False
thread: threading.Thread
@classmethod
def start(self):
self.thread = threading.Thread(target=self._runner)
self.started = True
self.stopping = False
self.thread.start()
@classmethod
def stop(self):
if self.started and self.thread.is_alive():
self.stopping = True
self.started = False
@classmethod
def _runner(self):
j = 0
while not self.stopping:
j += 1
print(f"\033[14;14H{j}{self.stopping}")
pass
def clean_quit(error: int = 0):
Key.stop()
Collector.stop()
SystemExit(error)
def clear():
print("\033[2J\033[1;1H") # Clear screen
def main():
clear()
Key.start()
Collector.start()
if __name__ == "__main__":
main()
|
stillReceiver.py
|
from .stillProcessor import StillProcessor
from .cameraAPI import RecordMode
import threading
import time
from queue import Queue
import requests
class StillReceiver:
def __init__(self, cameraManager):
self.cameraManager = cameraManager
self.image_queue = Queue()
self.runLoop = False
self.processor = StillProcessor(cameraManager, self.image_queue)
self.capturing = False
def loop(self):
print("stillReceivver: Entering loop")
while self.runLoop:
self.handle_receiver()
def start(self):
self.runLoop = True
try:
t = threading.Thread(target=self.loop)
t.start()
except:
print("Error starting stillReceiver")
self.processor.start()
def stop(self):
print("Stopping still receiver")
self.runLoop = False
self.processor.stop()
def handle_receiver(self):
if self.cameraManager.connected == False:
print("stillReceiver: Camera not connected")
time.sleep(5)
return
if self.cameraManager.wantedMode != RecordMode.STILL:
time.sleep(3)
return
if (
self.cameraManager.currentMode != RecordMode.STILL
and self.cameraManager.wantedMode == RecordMode.STILL
):
print("stillReceiver: Camera not in Still mode")
self.cameraManager.api.start_record_mode()
time.sleep(5)
return
if self.capturing == True:
print("Waiting on image capture")
time.sleep(1)
return
print("Capturing picture")
self.cameraManager.api.still_capture(self.handle_image)
self.capturing = True
def handle_image(self, result):
if result is None:
self.capturing = False
print("Image capture failed")
return
try:
photo_url = result["result"][0][0]
# Download image from Camera
photo = requests.get(photo_url).content
self.image_queue.put(photo)
except:
print("Failed to download image")
self.capturing = False
|
test_base_events.py
|
"""Tests for base_events.py"""
import concurrent.futures
import errno
import math
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
from test.support import os_helper
from test.support import socket_helper
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def tearDownModule():
asyncio.set_event_loop_policy(None)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
if socket_helper.IPV6_ENABLED:
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = self.loop.create_future()
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
class DummyExecutor(concurrent.futures.ThreadPoolExecutor):
def submit(self, fn, *args, **kwargs):
raise NotImplementedError(
'cannot submit into a dummy executor')
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
executor = DummyExecutor()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_set_default_executor_error(self):
executor = mock.Mock()
msg = 'executor must be ThreadPoolExecutor instance'
with self.assertRaisesRegex(TypeError, msg):
self.loop.set_default_executor(executor)
self.assertIsNone(self.loop._default_executor)
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
with self.assertRaises(TypeError, msg="delay must not be None"):
self.loop.call_later(None, cb)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
with self.assertRaises(TypeError, msg="when cannot be None"):
self.loop.call_at(None, cb)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = loop.create_future()
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop, None)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop, None)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop, None)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = self.loop.create_future()
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(SystemExit):
pass
async def foo(delay):
await asyncio.sleep(delay)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
with self.assertRaises(ShowStopper):
self.loop.run_until_complete(foo(0.1))
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
async def zero_error_coro():
await asyncio.sleep(0.01)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
self.assertFalse(mock_handler.called)
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
async def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_create_named_task_with_default_factory(self):
async def test():
pass
loop = asyncio.new_event_loop()
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_create_named_task_with_custom_factory(self):
def task_factory(loop, coro):
return asyncio.Task(coro, loop=loop)
async def test():
pass
loop = asyncio.new_event_loop()
loop.set_task_factory(task_factory)
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
async def leave_unfinalized_asyncgen(self):
# Create an async generator, iterate it partially, and leave it
# to be garbage collected.
# Used in async generator finalization tests.
# Depends on implementation details of garbage collector. Changes
# in gc may break this function.
status = {'started': False,
'stopped': False,
'finalized': False}
async def agen():
status['started'] = True
try:
for item in ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR']:
yield item
finally:
status['finalized'] = True
ag = agen()
ai = ag.__aiter__()
async def iter_one():
try:
item = await ai.__anext__()
except StopAsyncIteration:
return
if item == 'THREE':
status['stopped'] = True
return
asyncio.create_task(iter_one())
asyncio.create_task(iter_one())
return status
def test_asyncgen_finalization_by_gc(self):
# Async generators should be finalized when garbage collected.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
support.gc_collect()
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
def test_asyncgen_finalization_by_gc_in_other_thread(self):
# Python issue 34769: If garbage collector runs in another
# thread, async generators will not finalize in debug
# mode.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
self.loop.set_debug(True)
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
self.loop.run_until_complete(
self.loop.run_in_executor(None, support.gc_collect))
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.get_running_loop().create_future()
def _assert_state(self, *expected):
if self.state not in expected:
raise AssertionError(f'state: {self.state!r}, expected: {expected!r}')
def connection_made(self, transport):
self.transport = transport
self._assert_state('INITIAL')
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
self._assert_state('CONNECTED')
self.nbytes += len(data)
def eof_received(self):
self._assert_state('CONNECTED')
self.state = 'EOF'
def connection_lost(self, exc):
self._assert_state('CONNECTED', 'EOF')
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = loop.create_future()
def _assert_state(self, expected):
if self.state != expected:
raise AssertionError(f'state: {self.state!r}, expected: {expected!r}')
def connection_made(self, transport):
self.transport = transport
self._assert_state('INITIAL')
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
self._assert_state('INITIALIZED')
self.nbytes += len(data)
def error_received(self, exc):
self._assert_state('INITIALIZED')
def connection_lost(self, exc):
self._assert_state('INITIALIZED')
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@mock.patch('socket.getnameinfo')
def test_getnameinfo(self, m_gai):
m_gai.side_effect = lambda *args: 42
r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123)))
self.assertEqual(r, 42)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = self.loop.create_future()
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_server(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'no IPv6 support')
def test_create_server_ipv6(self):
async def main():
srv = await asyncio.start_server(lambda: None, '::1', 0)
try:
self.assertGreater(len(srv.sockets), 0)
finally:
srv.close()
await srv.wait_closed()
try:
self.loop.run_until_complete(main())
except OSError as ex:
if (hasattr(errno, 'EADDRNOTAVAIL') and
ex.errno == errno.EADDRNOTAVAIL):
self.skipTest('failed to bind to ::1')
else:
raise
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
async def getaddrinfo(*args, **kw):
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
if socket_helper.IPV6_ENABLED:
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms
# ('::1', 80) to ('::1', 80, 0, 0). The last 0s are flow info,
# scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'no IPv6 support')
@unittest.skipIf(sys.platform.startswith('aix'),
"bpo-25545: IPv6 scope id and getaddrinfo() behave differently on AIX")
@patch_socket
def test_create_connection_ipv6_scope(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
sock.family = socket.AF_INET6
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, 'fe80::1%1', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('fe80::1', 80, 0, 1))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
async def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
self.assertEqual((host, port), addr)
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = self.loop.create_future()
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
handshake_timeout = object()
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org',
ssl_handshake_timeout=handshake_timeout)
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
async def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
TypeError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
TypeError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_allow_broadcast(self):
protocol = MyDatagramProto(create_future=True, loop=self.loop)
self.loop.sock_connect = sock_connect = mock.Mock()
sock_connect.return_value = []
coro = self.loop.create_datagram_endpoint(
lambda: protocol,
remote_addr=('127.0.0.1', 0),
allow_broadcast=True)
transport, _ = self.loop.run_until_complete(coro)
self.assertFalse(sock_connect.called)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
self.assertEqual(transport._sock.family, socket.AF_UNIX)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@socket_helper.skip_unless_bind_unix_socket
def test_create_datagram_endpoint_existing_sock_unix(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX, type=socket.SOCK_DGRAM)
sock.bind(path)
sock.close()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
path, family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(coro)
transport.close()
self.loop.run_until_complete(protocol.done)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
@unittest.skipIf(sys.platform == 'vxworks',
"SO_BROADCAST is enabled by default on VxWorks")
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(
constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY, mock.ANY)
def test_call_coroutine(self):
async def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_until_complete(
self.loop.run_in_executor(None, func))
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
async def stop_loop_coro(loop):
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
async def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
class BaseLoopSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
self.transport = None
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
cls.__old_bufsize = constants.SENDFILE_FALLBACK_READBUFFER_SIZE
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 16
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = cls.__old_bufsize
os_helper.unlink(os_helper.TESTFN)
super().tearDownClass()
def setUp(self):
from asyncio.selector_events import BaseSelectorEventLoop
# BaseSelectorEventLoop() has no native implementation
self.loop = BaseSelectorEventLoop()
self.set_event_loop(self.loop)
self.file = open(os_helper.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, blocking=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(blocking)
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
server = self.run_loop(self.loop.create_server(
lambda: proto, socket_helper.HOST, 0, family=socket.AF_INET))
addr = server.sockets[0].getsockname()
for _ in range(10):
try:
self.run_loop(self.loop.sock_connect(sock, addr))
except OSError:
self.run_loop(asyncio.sleep(0.5))
continue
else:
break
else:
# One last try, so we get the exception
self.run_loop(self.loop.sock_connect(sock, addr))
def cleanup():
server.close()
self.run_loop(server.wait_closed())
sock.close()
if proto.transport is not None:
proto.transport.close()
self.run_loop(proto.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test__sock_sendfile_native_failure(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(proto.data, b'')
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_no_fallback(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop.sock_sendfile(sock, self.file,
fallback=False))
self.assertEqual(self.file.tell(), 0)
self.assertEqual(proto.data, b'')
def test_sock_sendfile_fallback(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(self.file.tell(), len(self.DATA))
self.assertEqual(proto.data, self.DATA)
def test_sock_sendfile_fallback_offset_and_count(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 2000)
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(proto.data, self.DATA[1000:3000])
def test_blocking_socket(self):
self.loop.set_debug(True)
sock = self.make_socket(blocking=True)
with self.assertRaisesRegex(ValueError, "must be non-blocking"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_nonbinary_file(self):
sock = self.make_socket()
with open(os_helper.TESTFN, encoding="utf-8") as f:
with self.assertRaisesRegex(ValueError, "binary mode"):
self.run_loop(self.loop.sock_sendfile(sock, f))
def test_nonstream_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
self.addCleanup(sock.close)
with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_notint_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count'))
def test_negative_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1))
def test_notint_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset'))
def test_negative_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, -1))
class TestSelectorUtils(test_utils.TestCase):
def check_set_nodelay(self, sock):
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertFalse(opt)
base_events._set_nodelay(sock)
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertTrue(opt)
@unittest.skipUnless(hasattr(socket, 'TCP_NODELAY'),
'need socket.TCP_NODELAY')
def test_set_nodelay(self):
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
self.check_set_nodelay(sock)
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
sock.setblocking(False)
self.check_set_nodelay(sock)
if __name__ == '__main__':
unittest.main()
|
efs-manager.py
|
#!/usr/bin/env python
import boto3
import datetime
import kubernetes
import kubernetes.client.rest
import logging
import os
import random
import re
import requests
import string
import threading
import time
import yaml
base_stunnel_port = int(os.environ.get('BASE_STUNNEL_PORT', 20490))
efs_polling_interval = int(os.environ.get('EFS_POLLING_INTERVAL', 300))
efs_stunnel_targets = {}
provision_backoff = {}
provision_backoff_interval = int(os.environ.get('PROVISION_BACKOFF_INTERVAL', 60))
storage_classes = {}
storage_provisioner_name = os.environ.get('STORAGE_PROVISIONER_NAME', 'gnuthought.com/efs-stunnel')
worker_image = os.environ.get('WORKER_IMAGE', 'docker-registry.default.svc:5000/openshift/rhel7:latest')
root_pv_done = False
def init():
"""Initialization function before management loops."""
init_logging()
init_efs_api()
init_kube_api()
init_namespace()
def init_logging():
"""Define logger global and set default logging level.
Default logging level is INFO and may be overridden with the
LOGGING_LEVEL environment variable.
"""
global logger
logging.basicConfig(
format='%(asctime)-15s %(levelname)s %(message)s',
)
logger = logging.getLogger('efs-manager')
logger.setLevel(os.environ.get('LOGGING_LEVEL', 'DEBUG'))
def init_efs_api():
"""Set efs_api global to communicate with the EFS API for this region."""
global efs_api
r = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document")
response_json = r.json()
efs_api = boto3.client('efs', region_name=response_json.get('region'))
def init_kube_api():
"""Set kube_api and kube_storage_api globals to communicate with the local
kubernetes cluster."""
global kube_api, kube_storage_api
with open('/run/secrets/kubernetes.io/serviceaccount/token') as f:
serviceaccount_token = f.read()
kubeconfig = kubernetes.client.Configuration()
kubeconfig.api_key['authorization'] = serviceaccount_token
kubeconfig.api_key_prefix['authorization'] = 'Bearer'
kubeconfig.host = os.environ['KUBERNETES_PORT'].replace('tcp://', 'https://', 1)
kubeconfig.ssl_ca_cert = '/run/secrets/kubernetes.io/serviceaccount/ca.crt'
kube_api = kubernetes.client.CoreV1Api(
kubernetes.client.ApiClient(kubeconfig)
)
kube_storage_api = kubernetes.client.StorageV1Api(
kubernetes.client.ApiClient(kubeconfig)
)
def init_namespace():
"""Set the namespace global based on the namespace in which this pod is
running.
"""
global namespace
with open('/run/secrets/kubernetes.io/serviceaccount/namespace') as f:
namespace = f.read()
def create_root_pvs_and_pvcs(conf_efs_stunnel_targets):
"""Create persistent volumes and persistent volume claims for the root
of each EFS filesystem. These are used to launch worker pods to create
and cleanup subdirectories for other volumes within the filesystem.
"""
for file_system_id, efs_stunnel_target in conf_efs_stunnel_targets.items():
pvc = None
pvc_name = "efs-stunnel-{}".format(file_system_id)
try:
pvc = kube_api.read_namespaced_persistent_volume_claim(pvc_name, namespace)
except kubernetes.client.rest.ApiException as e:
if e.status != 404:
logger.error("Error getting pvc {}:".format(pvc_name) + str(e))
if not pvc:
create_root_pv_and_pvc(
file_system_id,
efs_stunnel_target['stunnel_port']
)
root_pv_done = True
def create_root_pv_and_pvc(file_system_id, stunnel_port):
"""Create persistent volume and persistent volume claim for the root of a
given EFS filesystem id using the given stunnel port for access.
The storage class "efs-stunnel-system" is used for these resources to
prevent them from being managed as normal volumes.
A root_key label is designed as a security feature to prevent another
unexpected persistent volume claim from binding the root persistent volume.
"""
root_key = ''.join(random.sample(string.lowercase+string.digits, 10))
pvc = create_root_pvc(file_system_id, root_key)
create_root_pv(file_system_id, stunnel_port, pvc)
def create_root_pvc(file_system_id, root_key):
"""Create persistent volume claim for the root of the given EFS filesystem
id.
"""
logger.debug("create_root_pvc(): fs id: {} root_key {}".format(file_system_id, root_key))
return kube_api.create_namespaced_persistent_volume_claim(
namespace,
kubernetes.client.V1PersistentVolumeClaim(
metadata = kubernetes.client.V1ObjectMeta(
name = "efs-stunnel-{}".format(file_system_id),
annotations = {
"volume.beta.kubernetes.io/storage-provisioner": storage_provisioner_name
}
),
spec = kubernetes.client.V1PersistentVolumeClaimSpec(
access_modes = ['ReadWriteMany'],
resources = kubernetes.client.V1ResourceRequirements(
requests = { "storage": "1Gi" },
),
selector = kubernetes.client.V1LabelSelector(
match_labels = {
'file_system_id': file_system_id,
'root_key': root_key
}
),
storage_class_name = "efs-stunnel-system"
)
)
)
def create_root_pv(file_system_id, stunnel_port, pvc):
"""Create persistent volume for the root of the given EFS filesystem id."""
kube_api.create_persistent_volume(
kubernetes.client.V1PersistentVolume(
metadata = kubernetes.client.V1ObjectMeta(
name = "efs-stunnel-{}".format(file_system_id),
labels = pvc.spec.selector.match_labels,
annotations = {
'pv.kubernetes.io/provisioned-by': storage_provisioner_name,
'efs-stunnel.gnuthought.com/file-system-id': file_system_id
}
),
spec = kubernetes.client.V1PersistentVolumeSpec(
access_modes = ['ReadWriteMany'],
capacity = { "storage": "1Gi" },
claim_ref = kubernetes.client.V1ObjectReference(
api_version = pvc.api_version,
kind = pvc.kind,
name = pvc.metadata.name,
namespace = pvc.metadata.namespace,
resource_version = pvc.metadata.resource_version,
uid = pvc.metadata.uid
),
mount_options = [ 'port={}'.format(stunnel_port) ],
nfs = kubernetes.client.V1NFSVolumeSource(
path = '/',
server = '127.0.0.1'
),
persistent_volume_reclaim_policy = 'Retain',
storage_class_name = "efs-stunnel-system"
)
)
)
def remove_root_pvc(file_system_id):
"""Remove the root persistent volume claim for an EFS file system."""
delete_namespaced_persistent_volume_claim(
"efs-stunnel-{}".format(file_system_id),
namespace,
{}
)
def manage_stunnel_conf():
"""Main function for managing stunnel config map and global variable
efs_stunnel_targets.
"""
global efs_stunnel_targets
config_map = get_config_map()
conf_efs_stunnel_targets = get_config_map_efs_stunnel_targets(config_map)
if not root_pv_done:
create_root_pvs_and_pvcs(conf_efs_stunnel_targets)
efs_stunnel_targets = conf_efs_stunnel_targets
if not efs_stunnel_targets:
# Initialize efs_stunnel_targets if not set
efs_stunnel_targets = conf_efs_stunnel_targets
def get_config_map():
"""Return efs-stunnel config map if it exists."""
config_map = None
try:
config_map = kube_api.read_namespaced_config_map(
'efs-stunnel',
namespace
)
except kubernetes.client.rest.ApiException as e:
if e.status != 404:
raise Exception("Error getting efs-stunnel config map" + str(e))
return config_map
def get_config_map_efs_stunnel_targets(config_map):
"""Return efs_stunnel_targets from config map if it is defined. If the
config map is not defined or does not have efs_stunel_targets then return
an empty dict.
"""
if config_map:
stunnel_conf = yaml.load(config_map.data['efs-stunnel.yaml'])
return stunnel_conf.get('efs_stunnel_targets', {})
else:
return {}
def create_config_map():
"""Create the efs-stunnel config map with the efs_stunnel_targets."""
logger.info("Creating efs-stunnel config map")
kube_api.create_namespaced_config_map(
namespace,
kubernetes.client.V1ConfigMap(
data = config_map_data(),
metadata = kubernetes.client.V1ObjectMeta(
name = 'efs-stunnel',
labels = {
'component': 'efs-stunnel'
}
)
)
)
def update_config_map():
"""Patch the efs-stunnel config map with the efs_stunnel_targets."""
logger.info("Updating efs-stunnel config map")
kube_api.patch_namespaced_config_map(
'efs-stunnel',
namespace,
{ "data": config_map_data() }
)
def config_map_data():
"""Return the data for the efs-stunnel config map containing
efs_stunnel_targets and a timestamp of last_update.
"""
return {
"efs-stunnel.yaml": yaml.safe_dump({
"efs_stunnel_targets": efs_stunnel_targets,
"last_update": datetime.datetime.utcnow().strftime('%FT%TZ')
})
}
def check_provision_backoff(pvc):
"""Check if there was a recent attempt to provision a persistent volume
for the given persistent volume claim. If so, then return a true value.
"""
for pvc_uid, last_attempt in provision_backoff.items():
if last_attempt < time.time() - provision_backoff_interval:
del provision_backoff[pvc_uid]
ret = pvc.metadata.uid in provision_backoff
provision_backoff[pvc.metadata.uid] = time.time()
return ret
def get_file_system_id_from_pvc(pvc):
"""Get file system id from a persistent volume claim and enforce
restrictions on file system ids if set on the storage class.
"""
sc = storage_classes[pvc.spec.storage_class_name]
logger.debug("get_file_system_id_from_pvc(): {} ".format(sc))
file_system_id = pvc.spec.selector.match_labels.get('file_system_id', None)
logger.debug("get_file_system_id_from_pvc():pvc.spec.selector.match_labels.get-file_system_id: {} ".format(file_system_id))
if not file_system_id:
if 'default_file_system_id' in sc:
file_system_id = sc['default_file_system_id']
else:
return None
if 'file_system_ids' in sc:
# Restrict access to efs to specific volumes
if file_system_id == 'auto':
logger.debug("get_file_system_id_from_pvc():file_system_ids-auto {} ".format(file_system_id))
return sc['file_system_ids'][0]
elif file_system_id in sc['file_system_ids']:
logger.debug("get_file_system_id_from_pvc():file_system_ids {} ".format(file_system_id))
return file_system_id
else:
return None
if file_system_id == 'auto' and efs_stunnel_targets:
return efs_stunnel_targets.keys()[0]
else:
return file_system_id
def pvc_reject_reason(pvc):
"""Check if a persistent volume claim should be rejected and return a
string describing the reason if so.
"""
if not pvc.spec.selector:
return "Missing spec.selector", True
if not pvc.spec.selector.match_labels:
return "Missing spec.selector.match_labels", True
file_system_id = get_file_system_id_from_pvc(pvc)
if not file_system_id:
return "Missing spec.selector.match_labels.file_system_id", True
if not file_system_id in efs_stunnel_targets:
return "Unable to find file_system_id {}".format(file_system_id), False
if not pvc.spec.selector.match_labels.get('volume_name', False):
return "Missing spec.selector.match_labels.volume_name", True
if not re.match(r'^[a-z0-9_]+$', pvc.spec.selector.match_labels['volume_name']):
return "Invalid value for pvc.spec.selector.match_labels.volume_name", True
if not pvc.spec.selector.match_labels.get('reclaim_policy', 'Delete') in ['Delete','Retain']:
return "Invalid value for pvc.spec.selector.match_labels.reclaim_policy", True
return False, False
def record_pvc_reject(pvc, reject_reason):
"""Record that a persistent volume claim was rejected in the annotations
of the persistent volume claim.
"""
if not pvc.metadata.annotations:
pvc.metadata.annotations = {}
pvc.metadata.annotations['efs-stunnel.gnuthought.com/rejected'] = "true"
pvc.metadata.annotations['efs-stunnel.gnuthought.com/reject-reason'] = reject_reason
kube_api.replace_namespaced_persistent_volume_claim(
pvc.metadata.name,
pvc.metadata.namespace,
pvc
)
def reject_invalid_pvc(pvc):
"""Check if a persistent volume claim should be rejected and process the
rejection. Return True if rejected.
"""
reject_reason, record_rejection = pvc_reject_reason(pvc)
# FIXME - Create event on reject
if not reject_reason:
return
logger.warn("Rejecting pvc {} in {}: {}".format(
pvc.metadata.name,
pvc.metadata.namespace,
reject_reason
))
if record_rejection:
record_pvc_reject(pvc, reject_reason)
return True
def start_mountpoint_worker(worker_name, file_system_id, path, command):
"""Start worker pod to manage mountpoint directories within the EFS
filesystem.
"""
logger.info("Starting worker pod {}".format(worker_name))
kube_api.create_namespaced_pod(
namespace,
kubernetes.client.V1Pod(
metadata = kubernetes.client.V1ObjectMeta(
name = worker_name,
labels = { "component": "efs-worker" }
),
spec = kubernetes.client.V1PodSpec(
containers = [ kubernetes.client.V1Container(
name = "worker",
image = worker_image,
image_pull_policy = "IfNotPresent",
command = [
"/bin/sh",
"-c",
command
],
volume_mounts = [ kubernetes.client.V1VolumeMount(
mount_path = "/efs",
name = "efs"
)]
)],
restart_policy = "OnFailure",
security_context = kubernetes.client.V1PodSecurityContext(
run_as_user = 0
),
service_account_name = "efs-worker",
volumes = [ kubernetes.client.V1Volume(
name = "efs",
persistent_volume_claim = kubernetes.client.V1PersistentVolumeClaimVolumeSource(
claim_name = "efs-stunnel-{}".format(file_system_id),
)
)]
)
)
)
def delete_worker_pod(worker_name):
"""Delete a worker pod by name."""
logger.debug("delete_worker_pod():worker_name {} namespace {} ".format(worker_name, namespace))
delete_options = kubernetes.client.V1DeleteOptions()
kube_api.delete_namespaced_pod(
name=worker_name,
namespace=namespace,
body=delete_options)
def wait_for_worker_completion(worker_name):
"""Wait for worker to complete and delete if successful. Failed workers
indicate a misconfiguration or bug and so are left for troubleshooting.
"""
logger.debug("wait_for_worker_completion(): worker_name {} ".format(worker_name))
w = kubernetes.watch.Watch()
for event in w.stream(
kube_api.list_namespaced_pod,
namespace,
field_selector = "metadata.name={}".format(worker_name)
):
pod = event['object']
if event['type'] in ['ADDED','MODIFIED']:
if pod.status.phase == 'Succeeded':
logger.info("Worker pod {} completed".format(worker_name))
delete_worker_pod(worker_name)
return True
elif pod.status.phase == 'Failed':
logger.error("Worker pod {} failed".format(worker_name))
return False
def run_mountpoint_worker(file_system_id, path, action, command):
"""Run a mountpoint worker pod and wait for it to complete."""
logger.debug("run_mountpoint_worker()")
worker_name = "efs-{}-{}{}-{}".format(
action,
file_system_id,
re.sub('[^0-9a-zA-Z]+', '-', path),
''.join(random.sample(string.lowercase+string.digits, 5))
)
logger.debug("run_mountpoint_worker():worker_name {} ".format(worker_name))
start_mountpoint_worker(worker_name, file_system_id, path, command)
logger.debug("wait_for_worker_completion()")
wait_for_worker_completion(worker_name)
def initialize_pv_mountpoint(file_system_id, path):
"""Launch mountpoint worker pod to create a mountpoint within an EFS
filesystem.
"""
logger.debug("initialize_pv_mountpoint()")
run_mountpoint_worker(
file_system_id,
path,
'init',
'mkdir -p /efs{0}; chmod 777 /efs{0}'.format(path)
)
def remove_pv_mountpoint(file_system_id, path):
"""Launch mountpoint worker pod to remove a mountpoint within an EFS
filesystem.
"""
run_mountpoint_worker(
file_system_id,
path,
'clean',
'rm -rf /efs{0}'.format(path)
)
def create_pv_for_pvc(pvc):
"""Handle persistent volume creation for a persistent volume claim."""
logger.debug("create_pv_for_pv()")
if check_provision_backoff(pvc) \
or reject_invalid_pvc(pvc):
return
file_system_id = get_file_system_id_from_pvc(pvc)
logger.debug("create_pv_for_pv(): file_system_id: {}".format(file_system_id))
namespace = pvc.metadata.namespace
logger.debug("create_pv_for_pv(): namespace: {}".format(namespace))
volume_name = pvc.spec.selector.match_labels['volume_name']
logger.debug("create_pv_for_pv(): volume_name: {}".format(volume_name))
path = '/{}/{}'.format(pvc.metadata.namespace, volume_name)
logger.debug("create_pv_for_pv(): path: {}".format(path))
pv_name = "efs-stunnel-{}-{}-{}-{}".format(
file_system_id,
pvc.metadata.namespace,
volume_name,
''.join(random.sample(string.lowercase+string.digits, 5))
)
logger.debug("create_pv_for_pv(): pv_name: {}".format(pv_name))
initialize_pv_mountpoint(file_system_id, path)
kube_api.create_persistent_volume(
kubernetes.client.V1PersistentVolume(
metadata = kubernetes.client.V1ObjectMeta(
name = pv_name,
labels = pvc.spec.selector.match_labels,
annotations = {
"pv.kubernetes.io/provisioned-by": storage_provisioner_name,
"efs-stunnel.gnuthought.com/file-system-id": file_system_id
}
),
spec = kubernetes.client.V1PersistentVolumeSpec(
access_modes = pvc.spec.access_modes,
capacity = pvc.spec.resources.requests,
claim_ref = kubernetes.client.V1ObjectReference(
api_version = pvc.api_version,
kind = pvc.kind,
name = pvc.metadata.name,
namespace = pvc.metadata.namespace,
resource_version = pvc.metadata.resource_version,
uid = pvc.metadata.uid
),
mount_options = [
'port={}'.format(
efs_stunnel_targets[file_system_id]['stunnel_port']
)
],
nfs = kubernetes.client.V1NFSVolumeSource(
path = path,
server = '127.0.0.1'
),
persistent_volume_reclaim_policy = pvc.spec.selector.match_labels.get(
'reclaim_policy',
storage_classes[pvc.spec.storage_class_name]['reclaim_policy']
),
storage_class_name = pvc.spec.storage_class_name
)
)
)
logger.info("Created persistent volume {}".format(pv_name))
def pvc_is_root(pvc):
"""Return True if a persistent volume claim has a root_key."""
return pvc.spec.selector \
and pvc.spec.selector.match_labels \
and pvc.spec.selector.match_labels.get('root_key', None)
def pvc_has_been_rejected(pvc):
"""Return True if a persistent volume claim has a rejected annotation."""
annotations = pvc.metadata.annotations
return (
annotations and
annotations.get('efs-stunnel.gnuthought.com/rejected','') == 'true'
)
def manage_persistent_volume_claims():
"""Watch loop to manage persistent volume claims."""
# Wait for efs_stunnel_targets to be set
logger.debug("manage_persistent_volume_claims()")
while not efs_stunnel_targets:
time.sleep(10)
logger.info("Starting to manage efs-stunnel persistent volumes")
# FIXME: there should be a mechanism to periodically restart the watch to
# reprocess all persistent volume claims so that claims that were rejected
# because the EFS target did not exist can be picked up later if the EFS
# target is then discovered.
logger.debug("manage_persistent_volume_claims(): kubernetes watch")
w = kubernetes.watch.Watch()
for event in w.stream(
kube_api.list_persistent_volume_claim_for_all_namespaces
):
pvc = event['object']
if event['type'] in ['ADDED','MODIFIED'] \
and pvc.spec.storage_class_name in storage_classes \
and pvc.status.phase == 'Pending' \
and not pvc.spec.volume_name \
and not pvc_is_root(pvc) \
and not pvc_has_been_rejected(pvc):
create_pv_for_pvc(pvc)
def clean_persistent_volume(pv):
"""Remove mount point directory for a persistent volume."""
logger.info("Cleaning persistent volume {}".format(pv.metadata.name))
remove_pv_mountpoint(
pv.metadata.annotations['efs-stunnel.gnuthought.com/file-system-id'],
pv.spec.nfs.path
)
def delete_persistent_volume(pv):
"""Delete a persistent volume using the kubernetes api."""
logger.info("Deleting persistent volume {}".format(pv.metadata.name))
delete_options = kubernetes.client.V1DeleteOptions()
kube_api.delete_persistent_volume(name=pv.metadata.name, body=delete_options)
def manage_persistent_volumes():
"""Watch loop to manage persistent volume cleanup when released."""
w = kubernetes.watch.Watch()
for event in w.stream(
kube_api.list_persistent_volume
):
pv = event['object']
if event['type'] in ['ADDED','MODIFIED'] \
and pv.spec.storage_class_name in storage_classes \
and pv.status.phase == 'Released' \
and not pv.metadata.deletion_timestamp:
if pv.spec.persistent_volume_reclaim_policy == 'Delete':
clean_persistent_volume(pv)
delete_persistent_volume(pv)
def register_storage_class(sc):
"""Add storage class to storage_classes global valiable."""
storage_class = {
'reclaim_policy': sc.reclaim_policy
}
if sc.parameters:
if 'default_file_system_id' in sc.parameters:
storage_class['default_file_system_id'] = sc.parameters['default_file_system_id']
if 'file_system_ids' in sc.parameters:
storage_class['file_system_ids'] = sc.parameters['file_system_ids'].split(',')
storage_classes[sc.metadata.name] = storage_class
def manage_storage_classes():
"""Storage class watch loop."""
w = kubernetes.watch.Watch()
for event in w.stream(
kube_storage_api.list_storage_class
):
sc = event['object']
if event['type'] in ['ADDED','MODIFIED'] \
and sc.provisioner == storage_provisioner_name \
and not sc.metadata.deletion_timestamp:
register_storage_class(sc)
def manage_persistent_volume_claims_loop():
"""Top level loop for managing persistent volume claims."""
while True:
try:
manage_persistent_volume_claims()
except Exception as e:
logger.exception("Error in manage_persistent_volume_claims_loop: " + str(e))
time.sleep(60)
def manage_persistent_volumes_loop():
"""Top level loop for managing persistent volumes."""
while True:
try:
manage_persistent_volumes()
except Exception as e:
logger.exception("Error in manage_persistent_volumes_loop: " + str(e))
time.sleep(60)
def manage_storage_classes_loop():
"""Top level loop for managing storage classes."""
while True:
try:
manage_storage_classes()
except Exception as e:
logger.exception("Error in manage_storage_classes_loop: " + str(e))
time.sleep(60)
def manage_stunnel_loop():
"""Top level loop for managing stunnel."""
while True:
try:
manage_stunnel_conf()
except Exception as e:
logger.exception("Error in manage_stunnel_loop: " + str(e))
time.sleep(efs_polling_interval)
def main():
"""Main function."""
init()
threading.Thread(target=manage_persistent_volume_claims_loop).start()
threading.Thread(target=manage_persistent_volumes_loop).start()
threading.Thread(target=manage_storage_classes_loop).start()
manage_stunnel_loop()
if __name__ == '__main__':
main()
|
flaskProvider.py
|
from flask import Flask
from threading import Thread
# Using Flask for HTTP monitor
app = Flask("")
# Monitoring status
@app.route("/")
def home():
return "Twitter Scraper is ON"
# Running locally on port 8080
def run():
app.run(host="0.0.0.0", port=8080)
# Threading, keeping on monitoring in a single Thread action
def keep_alive():
t = Thread(target=run)
t.start()
|
test.py
|
import threading
#import threading.thread
import queue
import multiprocessing as mp
import sys
global stop_threads
import tensorflow as tf
import pandas as pd
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras import backend as k
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping
from keras.models import load_model
model_final=load_model("model_alexnet_best2.h5")
import predict1
import attributes
stop_threads=False
num=input('do u want to start')
if num=='y' :
from PIL import ImageGrab
import cv2
import time
import numpy as np
def trial(q,threadName):
#x=[]
#i=1
while True:
#start=time.time()
q.put(np.array(ImageGrab.grab(bbox=(40,180,1000,840))))
if stop_threads:
break
#cv2.imshow('GRAY33',gray)
#i=i+2
#print('1')
#if cv2.waitKey(1) & 0xFF == ord('q'):
# print('q')
# cv2.destroyAllWindows()
#p=mp.Process(target=hang)
#thread.threading.terminate()
#threadName.exit()
#end=time.time()
#sec=start-end;
#print(1/sec)
#x.append(screen1);
#return x;
#def try11(n):
# print('n')
q=queue.LifoQueue()
t=threading.Thread(target=trial,name='thread1',args=(q,'thread1',),daemon=True)
t.start()
#thread.start_new_thread(trial,("Thread-1", ))
#current_time=time.time()
label=[]
percents=[]
while True:
#from multiprocessing.pool import ThreadPool
#pool = ThreadPool(processes=1)
#async_result = pool.apply_async(trial, ("Thread-1", )) # tuple of args for foo
# do some other stuff in the main process
#screen = async_result.get()
screen1=q.get()
with q.mutex:
q.queue.clear()
screen1=cv2.resize(screen1,(480,320))
#screen1=np.array(ImageGrab.grab(bbox=(40,180,1000,840)))
screen1=cv2.cvtColor(screen1, cv2.COLOR_BGR2RGB)
gray=cv2.resize(cv2.cvtColor(screen1,cv2.COLOR_BGR2GRAY),(480,320))
screen2=cv2.resize(screen1,(480,320))
cv2.imshow('window',screen2)
gray=cv2.resize(cv2.cvtColor(screen1,cv2.COLOR_BGR2GRAY),(480,320))
cv2.imshow('GRAY',gray)
cv2.imshow('GaussianBlur',cv2.resize(cv2.GaussianBlur(screen1,(45,45),10),(480,320)))
#img5=cv2.imread(r'C:\Users\Abhi\Desktop\Robotics_course_upenn\Motion_planning\Plant\plantvillage\Tomato___Late_blight\0ab1cab4-a0c9-4323-9a64-cdafa4342a9b___GHLB2 Leaf 8918.JPG')
labels=predict1.predict1(model_final,screen1)
#print(a)
data=pd.DataFrame(columns={'label','percent'})
label.append(labels)
percent=attributes.attributes(img5)
percents.append(percent)
#print "loop took {} seconds",format(t)
#current_time=time.time()
#time.sleep(1)
#print('sss')
if cv2.waitKey(1) & 0xFF == ord('q'):
print('q')
cv2.destroyAllWindows()
data['label']=label
data['percent']=percents
data.to_csv(r'datasheet_very_2nd_useful.csv', index = False, header=True)
stop_threads=True
sys.exit()
break
|
CorretorMTP_FINAL.py
|
# Copyright 2017 GABRIEL JABLONSKI
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from PyQt5 import QtCore, QtGui, QtWidgets
import os
import stat
import subprocess
import threading
import time
import webbrowser
run_list = {}
compiled_list = {}
original_wd = os.getcwd()
user_count = 0
user_number = 0
progress_count = 0
progress_max = 0
run_total = 0
run_count = 1
users_file_info = {}
users_compiled = {}
output_verification = -1
compiled = False
clone_buffer = []
compile_buffer = []
output_buffer = []
def rmtree(path): # alternative to shutil.rmtree()
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
filename = os.path.join(root, name)
os.chmod(filename, stat.S_IWUSR)
os.remove(filename)
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(path)
def _clone(user):
user_path = original_wd + "\\Usuarios\\" + user[0]
os.chdir(original_wd + "\\Usuarios\\")
if os.path.exists(user_path):
rmtree(user_path)
clone_buffer.append("#Clonando repositório de %s..." % user[0])
p = subprocess.Popen(["git", "clone", "http://github.com/%s/%s" % (user[0], user[1]), user[0]],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
creationflags=0x08000000)
clone_response = p.communicate()
if 'fatal' in clone_response[1].decode('latin-1'):
clone_buffer.append("#Erro ao clonar respositório de %s. Erro: %s" %
(user[0], clone_response[1].decode('latin-1')))
else:
clone_buffer.append("-Repositório de %s clonado com sucesso." % user[0])
global user_number, user_count
user_count += 1
if user_count == user_number:
clone_buffer.append("==Clonagem finalizada.")
# def queue_compile(users, c_files_list):
# for user in users:
# _compile(user, c_files_list)
def _compile(user, c_files_list):
global progress_count, compiled
if user not in users_file_info:
users_file_info[user] = [[], []]
user_c_files = []
user_log = open(original_wd + "\\logs\\%s_log.txt" % user, "w")
user_log.write("Compilando\n" + 60 * "-" + "\n")
compile_buffer.append("--" + user + " iniciado.")
for root, dirs, files in os.walk(os.path.join(original_wd, "Usuarios", user)):
for name in files:
if name[-2:] == ".c":
user_c_files.append(name)
if name in c_files_list:
comp_process = subprocess.Popen(["g++", "-o",
os.path.join(original_wd,
"Compilados\\%s\\%s.exe" % (user, name[0:-2])),
os.path.join(root, name)], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
creationflags=0x08000000)
comp_response = comp_process.communicate()[1].decode('latin-1')
if comp_response is "":
compile_buffer.append("#%s: %s compilado com sucesso." % (user, name))
user_log.write("#%s compilado com sucesso.\n" % name)
users_file_info[user][0].append(name)
else:
compile_buffer.append("--Erro ao compilar " + name + ". Erro: \n\n" + comp_response + "\n\n")
user_log.write("\n--Erro ao compilar " + name + ". Erro: \n===============\n" + comp_response
+ "\n===============\n\n")
progress_count += 1
user_log.write("\n")
for c_file in c_files_list:
if c_file not in user_c_files:
compile_buffer.append("#%s: %s não encontrado.\n" % (user, c_file))
user_log.write("#%s não encontrado.\n" % c_file)
time.sleep(1)
progress_count += 1
compile_buffer.append("--%s finalizado.\n" % user)
user_log.write(60 * "-" + "\n")
user_log.close()
global user_number, user_count
user_count += 1
if user_count == user_number:
compile_buffer.append("==Compilação finalizada.")
compiled = True
def _run(run_list, user_list):
global output_verification, compiled_list, run_total, run_count
compiled_list = {}
for user in user_list:
compiled_list[user] = []
if not compiled:
users_file_info[user] = [[], []]
for root, dirs, files in os.walk(os.getcwd() + "\\Compilados\\" + user):
for name in files:
if name[-4:] == ".exe":
compiled_list[user].append(name)
if name[:-4] in run_list:
run_total += 1
for user in user_list:
user_log = open(original_wd + '\\logs\\%s_log.txt' % user, 'a')
user_log.write("Rodando\n" + 60*'-' + '\n')
for name in compiled_list[user]:
if name[0:-4] in run_list:
user_log.write('#' + name + '\n')
output_buffer.append('#%s: %s' % (user, name))
time_out = 0
prog_input, prog_output = run_list[name[0:-4]][0], run_list[name[0:-4]][1]
run_process = subprocess.Popen(["%s" % os.path.join(original_wd, "Compilados", user, name)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, creationflags=0x08000000)
run_process.stdin.write(bytes(prog_input, 'UTF-8'))
try:
run_response = run_process.communicate(timeout=1)[0].decode('latin-1')
except Exception:
output_buffer.append("====Tempo de execução excedido.")
user_log.write("==Tempo de execução excedido.\n")
time_out = 1
if not time_out:
user_log.write("--Entrada fornecida: '%s'\n" % prog_input)
try:
user_log.write("--Saída do programa:\n" + 45 * "\\" + "\n%s\n"
% run_response + 45 * "/" + "\n")
except Exception:
user_log.write("--Saída inesperada.\n")
user_log.write("--Saída esperada: '%s'\n" % prog_output)
output_buffer.append("--Entrada fornecida: '%s'\n" % prog_input)
output_buffer.append("--Saída do programa:\n" + 45 * "\\" + "\n%s\n"
% run_response + 45 * "/" + "\n")
output_buffer.append("--Saída esperada: '%s'\n" % prog_output)
while 1:
if output_verification == 1:
user_log.write("==Saída correta!\n\n")
run_count += 1
users_file_info[user][1].append(name)
output_verification = -1
break
elif output_verification == 0:
user_log.write("==Saída incorreta!\n\n")
run_count += 1
output_verification = -1
break
time.sleep(.5)
else:
output_buffer.append("Pressionar qualquer botão para continuar.")
while 1:
if output_verification == 1 or output_verification == 0:
output_verification = -1
run_count += 1
break
time.sleep(.5)
elif name[-4:] == '.exe':
users_file_info[user][1].append(name)
if not compiled:
for file in compiled_list[user]:
users_file_info[user][0].append(file[:-4] + '.c')
output_buffer.append("%s finalizado.\n" % user)
user_log.write(60 * "-" + "\n")
user_log.close()
output_buffer.append("Finalizado.\n")
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(380, 510)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralWidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 381, 501))
self.tabWidget.setObjectName("tabWidget")
self.cloneTab = QtWidgets.QWidget()
self.cloneTab.setObjectName("cloneTab")
self.lineUserEntry = QtWidgets.QLineEdit(self.cloneTab)
self.lineUserEntry.setGeometry(QtCore.QRect(10, 10, 111, 20))
self.lineUserEntry.setObjectName("lineUserEntry")
self.btnAddUser = QtWidgets.QPushButton(self.cloneTab)
self.btnAddUser.setGeometry(QtCore.QRect(10, 40, 111, 21))
self.btnAddUser.setObjectName("btnAddUser")
self.btnRemoveUser = QtWidgets.QPushButton(self.cloneTab)
self.btnRemoveUser.setGeometry(QtCore.QRect(260, 130, 101, 21))
self.btnRemoveUser.setObjectName("btnRemoveUser")
self.lineRepEntry = QtWidgets.QLineEdit(self.cloneTab)
self.lineRepEntry.setGeometry(QtCore.QRect(140, 10, 41, 20))
self.lineRepEntry.setObjectName("lineRepEntry")
self.lineListEntry = QtWidgets.QLineEdit(self.cloneTab)
self.lineListEntry.setGeometry(QtCore.QRect(10, 70, 111, 20))
self.lineListEntry.setObjectName("lineListEntry")
self.btnAddList = QtWidgets.QPushButton(self.cloneTab)
self.btnAddList.setGeometry(QtCore.QRect(10, 100, 111, 21))
self.btnAddList.setObjectName("btnAddList")
self.btnClone = QtWidgets.QPushButton(self.cloneTab)
self.btnClone.setGeometry(QtCore.QRect(140, 40, 221, 81))
self.btnClone.setObjectName("btnClone")
self.btnRemoveAll = QtWidgets.QPushButton(self.cloneTab)
self.btnRemoveAll.setGeometry(QtCore.QRect(260, 160, 101, 21))
self.btnRemoveAll.setObjectName("btnRemoveAll")
self.textCloneLog = QtWidgets.QTextEdit(self.cloneTab)
self.textCloneLog.setGeometry(QtCore.QRect(10, 330, 351, 121))
self.textCloneLog.setObjectName("textCloneLog")
self.textCloneLog.setReadOnly(1)
self.treeCloneUsers = QtWidgets.QTreeWidget(self.cloneTab)
self.treeCloneUsers.setGeometry(QtCore.QRect(10, 130, 241, 192))
self.treeCloneUsers.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored)
self.treeCloneUsers.setObjectName("treeCloneUsers")
self.treeCloneUsers.header().setDefaultSectionSize(138)
self.pushButton = QtWidgets.QPushButton(self.cloneTab)
self.pushButton.setGeometry(QtCore.QRect(260, 190, 101, 51))
self.pushButton.setObjectName("pushButton")
self.tabWidget.addTab(self.cloneTab, "")
self.compileTab = QtWidgets.QWidget()
self.compileTab.setObjectName("compileTab")
self.listUsers = QtWidgets.QListWidget(self.compileTab)
self.listUsers.setGeometry(QtCore.QRect(10, 30, 111, 181))
self.listUsers.setObjectName("listUsers")
self.labelUsers = QtWidgets.QLabel(self.compileTab)
self.labelUsers.setGeometry(QtCore.QRect(10, 10, 47, 13))
self.labelUsers.setObjectName("labelUsers")
self.lineFileName = QtWidgets.QLineEdit(self.compileTab)
self.lineFileName.setGeometry(QtCore.QRect(130, 160, 111, 21))
self.lineFileName.setObjectName("lineFileName")
self.btnAddFile = QtWidgets.QPushButton(self.compileTab)
self.btnAddFile.setGeometry(QtCore.QRect(130, 190, 111, 21))
self.btnAddFile.setObjectName("btnAddFile")
self.btnAddFileList = QtWidgets.QPushButton(self.compileTab)
self.btnAddFileList.setGeometry(QtCore.QRect(250, 190, 111, 21))
self.btnAddFileList.setObjectName("btnAddFileList")
self.lineEdit = QtWidgets.QLineEdit(self.compileTab)
self.lineEdit.setGeometry(QtCore.QRect(250, 160, 111, 20))
self.lineEdit.setObjectName("lineEdit")
self.btnCompile = QtWidgets.QPushButton(self.compileTab)
self.btnCompile.setGeometry(QtCore.QRect(10, 220, 351, 41))
self.btnCompile.setObjectName("btnCompile")
self.listFiles = QtWidgets.QListWidget(self.compileTab)
self.listFiles.setGeometry(QtCore.QRect(130, 30, 111, 121))
self.listFiles.setObjectName("listFiles")
self.labelFile = QtWidgets.QLabel(self.compileTab)
self.labelFile.setGeometry(QtCore.QRect(130, 10, 81, 16))
self.labelFile.setObjectName("labelFile")
self.textCompileLog = QtWidgets.QTextEdit(self.compileTab)
self.textCompileLog.setGeometry(QtCore.QRect(10, 300, 351, 131))
self.textCompileLog.setObjectName("textCompileLog")
self.textCompileLog.setReadOnly(1)
self.progressBar = QtWidgets.QProgressBar(self.compileTab)
self.progressBar.setGeometry(QtCore.QRect(10, 270, 361, 23))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.btnRemoveFile = QtWidgets.QPushButton(self.compileTab)
self.btnRemoveFile.setGeometry(QtCore.QRect(250, 30, 111, 23))
self.btnRemoveFile.setObjectName("btnRemoveFile")
self.btnRemoveAll_2 = QtWidgets.QPushButton(self.compileTab)
self.btnRemoveAll_2.setGeometry(QtCore.QRect(250, 60, 111, 23))
self.btnRemoveAll_2.setObjectName("btnRemoveAll_2")
self.comboUser = QtWidgets.QComboBox(self.compileTab)
self.comboUser.setGeometry(QtCore.QRect(10, 440, 121, 22))
self.comboUser.setObjectName("comboUser")
self.btnVerifyLog = QtWidgets.QPushButton(self.compileTab)
self.btnVerifyLog.setGeometry(QtCore.QRect(140, 440, 221, 23))
self.btnVerifyLog.setObjectName("btnVerifyLog")
self.btnVerifyLog.setDisabled(1)
self.tabWidget.addTab(self.compileTab, "")
self.runTab = QtWidgets.QWidget()
self.runTab.setObjectName("runTab")
self.labelInput = QtWidgets.QLabel(self.runTab)
self.labelInput.setGeometry(QtCore.QRect(10, 150, 47, 13))
self.labelInput.setObjectName("labelInput")
self.labelOutput = QtWidgets.QLabel(self.runTab)
self.labelOutput.setGeometry(QtCore.QRect(140, 150, 111, 16))
self.labelOutput.setObjectName("labelOutput")
self.lineInput = QtWidgets.QLineEdit(self.runTab)
self.lineInput.setGeometry(QtCore.QRect(10, 170, 111, 20))
self.lineInput.setObjectName("lineInput")
self.lineOutput = QtWidgets.QLineEdit(self.runTab)
self.lineOutput.setGeometry(QtCore.QRect(140, 170, 111, 20))
self.lineOutput.setObjectName("lineOutput")
self.tableFiles = QtWidgets.QTreeWidget(self.runTab)
self.tableFiles.setGeometry(QtCore.QRect(10, 10, 351, 91))
self.tableFiles.setObjectName("tableFiles")
self.tableFiles.header().setDefaultSectionSize(116)
self.comboFiles = QtWidgets.QComboBox(self.runTab)
self.comboFiles.setGeometry(QtCore.QRect(10, 120, 101, 21))
self.comboFiles.setObjectName("comboFiles")
self.checkNoOutput = QtWidgets.QCheckBox(self.runTab)
self.checkNoOutput.setGeometry(QtCore.QRect(140, 120, 141, 17))
self.checkNoOutput.setObjectName("checkNoOutput")
self.btnUpdate = QtWidgets.QPushButton(self.runTab)
self.btnUpdate.setGeometry(QtCore.QRect(260, 150, 101, 41))
self.btnUpdate.setObjectName("btnUpdate")
self.textFileOutput = QtWidgets.QTextEdit(self.runTab)
self.textFileOutput.setGeometry(QtCore.QRect(10, 250, 351, 171))
self.textFileOutput.setObjectName("textFileOutput")
self.textFileOutput.setReadOnly(1)
self.btnRun = QtWidgets.QPushButton(self.runTab)
self.btnRun.setGeometry(QtCore.QRect(10, 200, 351, 41))
self.btnRun.setObjectName("btnRun")
self.btnRight = QtWidgets.QPushButton(self.runTab)
self.btnRight.setGeometry(QtCore.QRect(10, 430, 171, 31))
self.btnRight.setObjectName("btnRight")
self.btnWrong = QtWidgets.QPushButton(self.runTab)
self.btnWrong.setGeometry(QtCore.QRect(190, 430, 171, 31))
self.btnWrong.setObjectName("btnWrong")
self.tabWidget.addTab(self.runTab, "")
self.resultsTab = QtWidgets.QWidget()
self.resultsTab.setObjectName("resultsTab")
self.treeUsers = QtWidgets.QTreeWidget(self.resultsTab)
self.treeUsers.setGeometry(QtCore.QRect(10, 10, 351, 181))
self.treeUsers.setObjectName("treeUsers")
self.treeUsers.header().setCascadingSectionResizes(False)
self.treeUsers.header().setDefaultSectionSize(124)
self.comboUser_2 = QtWidgets.QComboBox(self.resultsTab)
self.comboUser_2.setGeometry(QtCore.QRect(10, 200, 111, 21))
self.comboUser_2.setObjectName("comboUser_2")
self.treeFiles = QtWidgets.QTreeWidget(self.resultsTab)
self.treeFiles.setGeometry(QtCore.QRect(10, 230, 161, 181))
self.treeFiles.setObjectName("treeFiles")
self.treeFiles.header().setDefaultSectionSize(59)
self.comboFile = QtWidgets.QComboBox(self.resultsTab)
self.comboFile.setGeometry(QtCore.QRect(130, 200, 111, 22))
self.comboFile.setObjectName("comboFile")
self.btnRectify = QtWidgets.QPushButton(self.resultsTab)
self.btnRectify.setGeometry(QtCore.QRect(250, 230, 111, 23))
self.btnRectify.setObjectName("btnRectify")
self.btnLogs = QtWidgets.QPushButton(self.resultsTab)
self.btnLogs.setGeometry(QtCore.QRect(180, 420, 181, 41))
self.btnLogs.setObjectName("btnLogs")
self.btnVerify = QtWidgets.QPushButton(self.resultsTab)
self.btnVerify.setGeometry(QtCore.QRect(250, 200, 111, 23))
self.btnVerify.setObjectName("btnVerify")
self.textOutput = QtWidgets.QTextEdit(self.resultsTab)
self.textOutput.setGeometry(QtCore.QRect(180, 260, 181, 151))
self.textOutput.setObjectName("textOutput")
self.textOutput.setReadOnly(1)
self.lineLog = QtWidgets.QLineEdit(self.resultsTab)
self.lineLog.setGeometry(QtCore.QRect(60, 430, 113, 20))
self.lineLog.setObjectName("lineLog")
self.tabWidget.addTab(self.resultsTab, "")
MainWindow.setCentralWidget(self.centralWidget)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.compileTab.setDisabled(1)
self.runTab.setDisabled(1)
self.resultsTab.setDisabled(1)
## Tab : Clonar
self.btnAddUser.clicked.connect(self.add_user)
self.btnAddList.clicked.connect(self.add_user_list)
self.btnRemoveUser.clicked.connect(self.remove_user)
self.btnRemoveAll.clicked.connect(self.remove_all)
self.pushButton.clicked.connect(self.update_compiling)
self.btnClone.clicked.connect(self.clone_users)
self.clone_timer = QtCore.QTimer()
self.clone_timer.setInterval(1000)
self.clone_timer.timeout.connect(self.update_clone_log)
## Tab : Compilar
self.btnAddFile.clicked.connect(self.add_file)
self.btnAddFileList.clicked.connect(self.add_file_list)
self.btnRemoveFile.clicked.connect(self.remove_file)
self.btnRemoveAll_2.clicked.connect(self.remove_all_files)
self.btnCompile.clicked.connect(self.compile_files)
self.btnVerifyLog.clicked.connect(self.open_log)
self.compile_timer = QtCore.QTimer()
self.compile_timer.setInterval(1000)
self.compile_timer.timeout.connect(self.update_compile_log)
## Tab : Rodar
self.btnUpdate.clicked.connect(self.update_files)
self.tableFiles.itemClicked.connect(self.new_tree_selection_run)
self.comboFiles.currentTextChanged.connect(self.new_combo_selection_run)
self.btnRun.clicked.connect(self.run_files)
self.btnRight.clicked.connect(self.right_answer)
self.btnWrong.clicked.connect(self.wrong_answer)
self.output_timer = QtCore.QTimer()
self.output_timer.setInterval(100)
self.output_timer.timeout.connect(self.update_file_output)
## Tab : Resultados
self.comboUser_2.currentTextChanged.connect(self.new_combo_selection_results)
self.btnVerify.clicked.connect(self.verify_output)
self.btnRectify.clicked.connect(self.rectify_result)
self.treeUsers.itemClicked.connect(self.new_tree_selection_results)
self.btnLogs.clicked.connect(self.save_log)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Corretor MTP v1.0"))
self.lineUserEntry.setText(_translate("MainWindow", "Usuário do Github"))
self.btnAddUser.setText(_translate("MainWindow", "Adicionar"))
self.btnRemoveUser.setText(_translate("MainWindow", "Remover"))
self.lineRepEntry.setText(_translate("MainWindow", "MTP"))
self.lineListEntry.setText(_translate("MainWindow", "lista_usuarios.txt"))
self.btnAddList.setText(_translate("MainWindow", "Adicionar lista"))
self.btnClone.setText(_translate("MainWindow", "Clonar\nrepositórios"))
self.btnRemoveAll.setText(_translate("MainWindow", "Remover todos"))
self.treeCloneUsers.headerItem().setText(0, _translate("MainWindow", "Usuário"))
self.treeCloneUsers.headerItem().setText(1, _translate("MainWindow", "Repositório"))
self.pushButton.setText(_translate("MainWindow", "Atualizar\nlista para\ncompilação"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.cloneTab), _translate("MainWindow", "Clonar"))
self.labelUsers.setText(_translate("MainWindow", "Usuários:"))
self.lineFileName.setText(_translate("MainWindow", "exemplo.c"))
self.btnAddFile.setText(_translate("MainWindow", "Adicionar programa"))
self.btnAddFileList.setText(_translate("MainWindow", "Adicionar lista"))
self.lineEdit.setText(_translate("MainWindow", "lista_programas.txt"))
self.btnCompile.setText(_translate("MainWindow", "Compilar"))
self.labelFile.setText(_translate("MainWindow", "Programas:"))
self.btnRemoveFile.setText(_translate("MainWindow", "Remover"))
self.btnRemoveAll_2.setText(_translate("MainWindow", "Remover todos"))
self.btnVerifyLog.setText(_translate("MainWindow", "Verificar log de compilação"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.compileTab), _translate("MainWindow", "Compilar"))
self.labelInput.setText(_translate("MainWindow", "Input:"))
self.labelOutput.setText(_translate("MainWindow", "Output esperado:"))
self.lineInput.setText(_translate("MainWindow", "1 2 3"))
self.lineOutput.setText(_translate("MainWindow", "Hello World!"))
self.tableFiles.headerItem().setText(0, _translate("MainWindow", "Programa"))
self.tableFiles.headerItem().setText(1, _translate("MainWindow", "Input"))
self.tableFiles.headerItem().setText(2, _translate("MainWindow", "Output"))
self.checkNoOutput.setText(_translate("MainWindow", "Desconsiderar Output"))
self.btnUpdate.setText(_translate("MainWindow", "Atualizar"))
self.btnRun.setText(_translate("MainWindow", "Rodar"))
self.btnRight.setText(_translate("MainWindow", "Saída correta"))
self.btnWrong.setText(_translate("MainWindow", "Saída incorreta"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.runTab), _translate("MainWindow", "Rodar"))
self.treeUsers.headerItem().setText(0, _translate("MainWindow", "Usuário"))
self.treeUsers.headerItem().setText(1, _translate("MainWindow", "Compilados"))
self.treeUsers.headerItem().setText(2, _translate("MainWindow", "Saída correta"))
self.treeFiles.headerItem().setText(0, _translate("MainWindow", "Programa"))
self.treeFiles.headerItem().setText(1, _translate("MainWindow", "Saída correta?"))
self.btnRectify.setText(_translate("MainWindow", "Retificar correção"))
self.btnLogs.setText(_translate("MainWindow", "Gerar relatório"))
self.btnVerify.setText(_translate("MainWindow", "Verificar outuput"))
self.lineLog.setText(_translate("MainWindow", "notas_lab1.txt"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.resultsTab), _translate("MainWindow", "Resultados"))
def add_user(self):
current_users = []
for i in range(self.treeCloneUsers.topLevelItemCount()):
current_users.append(self.treeCloneUsers.topLevelItem(i).text(0))
user_text = [self.lineUserEntry.text(), self.lineRepEntry.text()]
if user_text[0] is not "":
if user_text[0] not in current_users:
user = QtWidgets.QTreeWidgetItem(user_text)
self.treeCloneUsers.addTopLevelItem(user)
self.lineUserEntry.clear()
self.lineRepEntry.setText("MTP")
def add_user_list(self):
if os.path.isfile(self.lineListEntry.text()):
user_list = []
user_list_file = open(self.lineListEntry.text(), 'r')
current_users = []
for i in range(self.treeCloneUsers.topLevelItemCount()):
current_users.append(self.treeCloneUsers.topLevelItem(i).text(0))
for line in user_list_file:
username, repname = line.split()
user_list.append([username, repname, 0, 0])
user_list_file.close()
for user in user_list:
if user[0] not in current_users:
self.treeCloneUsers.addTopLevelItem(QtWidgets.QTreeWidgetItem([user[0], user[1]]))
def remove_user(self):
user_to_remove = self.treeCloneUsers.selectedItems()
if len(user_to_remove) != 0:
self.treeCloneUsers.takeTopLevelItem(self.treeCloneUsers.indexOfTopLevelItem(user_to_remove[0]))
def remove_all(self):
self.treeCloneUsers.clear()
def update_compiling(self):
self.compileTab.setDisabled(0)
self.listUsers.clear()
self.comboUser.clear()
for i in range(self.treeCloneUsers.topLevelItemCount()):
self.listUsers.addItem(self.treeCloneUsers.topLevelItem(i).text(0))
self.comboUser.addItem(self.treeCloneUsers.topLevelItem(i).text(0))
def clone_users(self):
if self.treeCloneUsers.topLevelItemCount() != 0:
self.clone_timer.start()
users_to_clone = []
for i in range(self.treeCloneUsers.topLevelItemCount()):
users_to_clone.append([self.treeCloneUsers.topLevelItem(i).text(0),
self.treeCloneUsers.topLevelItem(i).text(1)])
global user_number, user_count
user_number = len(users_to_clone)
user_count = 0
if not os.path.exists(original_wd + "\\Usuarios"):
os.mkdir("Usuarios")
os.chdir(os.getcwd() + "\\Usuarios")
for user in users_to_clone:
thread_clone = threading.Thread(target=_clone, args=[user])
thread_clone.start()
self.btnClone.setDisabled(1)
def update_clone_log(self):
a = len(clone_buffer)
for i in range(a):
self.textCloneLog.append(clone_buffer[i])
if clone_buffer[i] == "==Clonagem finalizada.":
self.clone_timer.stop()
for i in range(a):
clone_buffer.pop(0)
def add_file(self):
self.runTab.setDisabled(0)
current_files = []
for i in range(self.listFiles.count()):
current_files.append(self.listFiles.item(i).text())
file_name = self.lineFileName.text()
if file_name is not "":
if file_name not in current_files:
self.listFiles.addItem(file_name)
self.tableFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([file_name, '-', '-']))
self.comboFiles.addItem(file_name)
if self.comboFiles.findText(" ") != -1:
self.comboFiles.removeItem(self.comboFiles.findText(" "))
self.lineFileName.clear()
if not self.tableFiles.topLevelItemCount() == 0:
self.lineInput.setText(self.tableFiles.topLevelItem(0).text(1))
self.lineOutput.setText(self.tableFiles.topLevelItem(0).text(2))
def add_file_list(self):
os.chdir(original_wd)
if os.path.isfile(self.lineEdit.text()):
file_list = []
file_list_file = open(self.lineEdit.text(), 'r')
self.runTab.setDisabled(0)
current_files = []
for i in range(self.listFiles.count()):
current_files.append(self.listFiles.item(i).text())
for line in file_list_file:
file_name, file_input, file_output, file_run = line.split(":")
file_list.append([file_name, file_input, file_output, file_run.split()[0]])
file_list_file.close()
for file in file_list:
if file[0] not in current_files:
self.listFiles.addItem(file[0])
self.comboFiles.addItem(file[0])
if self.comboFiles.findText(" ") != -1:
self.comboFiles.removeItem(self.comboFiles.findText(" "))
if file[3] == '1':
self.tableFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([file[0], file[1], file[2]]))
else:
self.tableFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([file[0], '-', '-']))
self.lineInput.setText(self.tableFiles.topLevelItem(0).text(1))
self.lineOutput.setText(self.tableFiles.topLevelItem(0).text(2))
def remove_file(self):
file_to_remove = self.listFiles.selectedItems()
if len(file_to_remove) != 0:
if self.listFiles.count() == 1:
self.comboFiles.addItem(" ")
self.comboFiles.removeItem(self.listFiles.row(file_to_remove[0]))
self.tableFiles.takeTopLevelItem(self.listFiles.row(file_to_remove[0]))
self.listFiles.takeItem(self.listFiles.row(file_to_remove[0]))
if self.tableFiles.topLevelItem(0):
self.lineInput.setText(self.tableFiles.topLevelItem(0).text(1))
self.lineOutput.setText(self.tableFiles.topLevelItem(0).text(2))
else:
self.lineInput.setText("1 2 3")
self.lineOutput.setText("Hello World!")
def remove_all_files(self):
self.listFiles.clear()
self.tableFiles.clear()
self.comboFiles.clear()
if self.tableFiles.topLevelItem(0):
self.lineInput.setText(self.tableFiles.topLevelItem(0).text(1))
self.lineOutput.setText(self.tableFiles.topLevelItem(0).text(2))
else:
self.lineInput.setText("1 2 3")
self.lineOutput.setText("Hello World!")
def compile_files(self):
if self.listUsers.count() != 0 and self.listFiles.count() != 0:
self.compile_timer.start()
if not os.path.exists(original_wd + "\\logs"):
os.mkdir(original_wd + "\\logs")
users_to_compile = []
for i in range(self.listUsers.count()):
users_to_compile.append(self.listUsers.item(i).text())
c_files = []
for i in range(self.listFiles.count()):
c_files.append(self.listFiles.item(i).text())
global progress_count, progress_max
progress_max = len(users_to_compile) * len(c_files)
progress_count = 0
global user_number, user_count
user_number = len(users_to_compile)
user_count = 0
if not os.path.exists(original_wd + "\\Compilados"):
os.mkdir("Compilados")
os.chdir(original_wd + "\\Compilados")
self.textCompileLog.append("Compilando...\n")
# self.textCompileLog.ensureCursorVisible()
# delay = 0
for user in users_to_compile:
if not os.path.exists(original_wd + "\\Compilados\\" + user):
os.mkdir(original_wd + "\\Compilados\\" + user)
# thread_compile = threading.Timer(delay, _compile, [user, c_files])
thread_compile = threading.Thread(target=_compile, args=[user, c_files])
thread_compile.start()
# delay += 10
# thread_compile_all = threading.Thread(target=queue_compile, args=[users_to_compile, c_files])
# thread_compile_all.start()
os.chdir(original_wd)
def update_compile_log(self):
a = len(compile_buffer)
for i in range(a):
self.textCompileLog.append(compile_buffer[i])
if compile_buffer[i] == "==Compilação finalizada.":
self.compile_timer.stop()
self.btnVerifyLog.setDisabled(0)
for i in range(a):
compile_buffer.pop(0)
self.progressBar.setValue(100 * progress_count // progress_max)
def open_log(self):
user_name = self.comboUser.currentText()
log_path = original_wd + "\\Compilados\\" + user_name + "\\%s_log.txt" % user_name
if os.path.isfile(log_path):
webbrowser.open(log_path)
else:
window = QtWidgets.QMessageBox()
window.move(600, 200)
QtWidgets.QMessageBox.warning(window, 'Erro', "Log não encontrado", QtWidgets.QMessageBox.Ok)
def update_files(self):
if self.comboFiles.currentIndex() != -1:
table_item = self.tableFiles.topLevelItem(self.comboFiles.currentIndex())
if self.checkNoOutput.isChecked():
table_item.setData(1, 0, '-')
table_item.setData(2, 0, '-')
else:
table_item.setData(1, 0, self.lineInput.text())
table_item.setData(2, 0, self.lineOutput.text())
def new_tree_selection_run(self):
tree_selected = self.tableFiles.selectedItems()
if tree_selected:
self.tableFiles.clearSelection()
self.comboFiles.setCurrentIndex(self.tableFiles.indexOfTopLevelItem(tree_selected[0]))
def new_combo_selection_run(self):
if self.tableFiles.topLevelItemCount():
self.tableFiles.clearSelection()
self.tableFiles.topLevelItem(self.comboFiles.currentIndex()).setSelected(1)
self.lineInput.setText(self.tableFiles.selectedItems()[0].text(1))
self.lineOutput.setText(self.tableFiles.selectedItems()[0].text(2))
def run_files(self):
global run_list
run_list = {}
self.output_timer.start()
for i in range(self.tableFiles.topLevelItemCount()):
if self.tableFiles.topLevelItem(i).text(2) != '-':
if self.tableFiles.topLevelItem(i).text(1) == '-':
run_list[self.tableFiles.topLevelItem(i).text(0)[:-2]] \
= ["", self.tableFiles.topLevelItem(i).text(2)]
else:
run_list[self.tableFiles.topLevelItem(i).text(0)[:-2]] = [self.tableFiles.topLevelItem(i).text(1),
self.tableFiles.topLevelItem(i).text(2)]
# else:
# for user in users_file_info:
# if self.tableFiles.topLevelItem(i).text(0) in users_file_info[user][0]:
# users_file_info[user][1].append(self.tableFiles.topLevelItem(i).text(0))
user_list = []
for i in range(self.listUsers.count()):
user_list.append(self.listUsers.item(i).text())
if run_list and user_list:
thread_run = threading.Thread(target=_run, args=[run_list, user_list])
thread_run.start()
threading.Timer(2.0, self.update_file_output).start()
self.resultsTab.setDisabled(0)
def update_file_output(self):
if output_buffer:
for line in output_buffer:
self.textFileOutput.append(line)
self.btnRight.setDisabled(0)
self.btnWrong.setDisabled(0)
if line == "Finalizado.\n":
self.output_timer.stop()
self.btnRight.setDisabled(1)
self.btnWrong.setDisabled(1)
for user in users_file_info:
info = [user, str(len(users_file_info[user][0])), str(len(users_file_info[user][1]))]
self.treeUsers.addTopLevelItem(QtWidgets.QTreeWidgetItem(info))
self.comboUser_2.addItem(user)
for item in run_list:
self.comboFile.addItem(item + '.exe')
output_buffer.clear()
def right_answer(self):
global output_verification, run_total, run_count
self.textFileOutput.clear()
self.textFileOutput.append("%d/%d\n==Saída correta!" % (run_count, run_total))
output_verification = 1
self.btnRight.setDisabled(1)
self.btnWrong.setDisabled(1)
def wrong_answer(self):
global output_verification, run_total, run_count
self.textFileOutput.clear()
self.textFileOutput.append("%d/%d\n==Saída incorreta!" % (run_count, run_total))
output_verification = 0
self.btnRight.setDisabled(1)
self.btnWrong.setDisabled(1)
def new_combo_selection_results(self):
if self.treeUsers.topLevelItemCount():
self.treeUsers.clearSelection()
self.treeUsers.topLevelItem(self.comboUser_2.currentIndex()).setSelected(1)
self.treeFiles.clear()
for program in users_file_info[self.comboUser_2.currentText()][0]:
if program[:-2] in run_list:
if program[:-2] + ".exe" in users_file_info[self.comboUser_2.currentText()][1]:
self.treeFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([program[:-2] + '.exe', "Sim"]))
else:
self.treeFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([program[:-2] + '.exe', "Não"]))
def new_tree_selection_results(self):
tree_selected = self.treeUsers.selectedItems()
if tree_selected:
self.treeUsers.clearSelection()
self.comboUser_2.setCurrentIndex(self.treeUsers.indexOfTopLevelItem(tree_selected[0]))
def verify_output(self):
cur_program = self.comboFile.currentText()
cur_user = self.comboUser_2.currentText()
self.textOutput.clear()
if cur_program[0:-4] + '.c' not in users_file_info[cur_user][0]:
self.textOutput.append("%s não compilado para %s." % (cur_program, cur_user))
else:
time_out = 0
prog_input, prog_output = run_list[cur_program[0:-4]][0], run_list[cur_program[0:-4]][1]
run_process = subprocess.Popen(["%s" % os.path.join(original_wd, "Compilados", cur_user, cur_program)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, creationflags=0x08000000)
run_process.stdin.write(bytes(prog_input, 'UTF-8'))
try:
run_response = run_process.communicate(timeout=1)[0].decode('latin-1')
except Exception:
self.textOutput.append("====Tempo de execução excedido.")
time_out = 1
if not time_out:
self.textOutput.append("--Entrada fornecida: '%s'\n" % prog_input)
self.textOutput.append("--Saída do programa:\n" + 45 * "\\" + "\n%s\n"
% run_response + 45 * "/" + "\n")
self.textOutput.append("--Saída esperada: '%s'\n" % prog_output)
def rectify_result(self):
cur_program = self.comboFile.currentText()
cur_user = self.comboUser_2.currentText()
tree_item = self.treeUsers.topLevelItem(self.comboUser_2.currentIndex())
if cur_program in users_file_info[cur_user][1]:
users_file_info[cur_user][1].remove(cur_program)
self.treeUsers.editItem(tree_item, 2)
tree_item.setText(2, str(int(tree_item.text(2)) - 1))
else:
if cur_program[:-4] + '.c' in users_file_info[cur_user][0]:
users_file_info[cur_user][1].append(cur_program)
tree_item.setText(2, str(int(tree_item.text(2)) + 1))
self.new_combo_selection_results()
def save_log(self):
try:
log = open(self.lineLog.text(), 'w')
for user in users_file_info:
log.write(user + (20 - len(user)) * " " + " :" + (2 - len(users_file_info[user][0]) // 10) * " " +
str(len(users_file_info[user][0])) + " : " + str(len(users_file_info[user][1])) + "\n")
log.close()
finally:
self.btnLogs.setText("Relatório gerado com sucesso")
self.btnLogs.setDisabled(1)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
connhandler.py
|
import platform
import requests
import json
import time
from pythonapm import constants
from pythonapm.agentfactory import get_agent
from pythonapm.constants import api_connect, api_data
from pythonapm.logger import agentlogger
from pythonapm.collector.reqhandler import send_req
from pythonapm.collector.reshandler import handle_connect_response
from pythonapm.collector.datahandler import process_collected_data
task_spawned = False
conn_payload = None
def init_connection():
global task_spawned
try:
if task_spawned is True:
return
import threading
t = threading.Thread(target=background_task, args=(), kwargs={})
t.setDaemon(True)
t.start()
task_spawned = True
except Exception as exc:
agentlogger.exception('Error while spawning thread')
def background_task():
conn_success = False
while(get_agent().insinfo.get_status() != constants.shutdown):
try:
if conn_success is False:
conn_success = send_connect()
else:
process_collected_data()
except Exception:
agentlogger.exception('pythonapm task error')
finally:
get_agent().get_metric_store().cleanup()
time.sleep(60)
def send_connect():
payload = getconn_payload() if conn_payload is None else conn_payload
res_data = send_req(api_connect, payload)
return handle_connect_response(res_data)
def getconn_payload():
global conn_payload
config = get_agent().get_config()
conn_payload = {
"agent_info" : {
"agent_version": '1.0.3',
"host_type": platform.system(),
"hostname": platform.node()
}, "environment" : {
"os_version": platform.release(),
"machine_name": platform.node(),
'AgentInstallPath': config.get_installed_dir(),
"python_version": platform.python_version(),
"osarch": platform.machine(),
"os": platform.system(),
}
}
return conn_payload
|
main_qt.py
|
# QtServer
from websocket_server import WebsocketServer
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore
import threading
import datetime as dt
import json
app = pg.QtGui.QApplication([])
peak = 0
y_hr = []
x_time = []
pg.setConfigOptions(antialias=True)
view = pg.GraphicsLayoutWidget(title="HR2PC QTRewrite")
p = view.addPlot(title='<h2 style="color:white;">Heart Rate Monitor</h2>')
curve = p.plot()
view.show()
app.aboutToQuit.connect(exit)
def update():
curve.setData(y_hr)
if len(x_time) != 0:
curve.setPos(x_time[-1], 0)
title = '<h3 style="color:white;">Current: {0} | Average: {1} | Peak: {2}</h3>'
p.setTitle(title.format(y_hr[-1], round(sum(y_hr)/len(y_hr)), max(y_hr)))
app.processEvents()
def on_join(client, server):
print("Client @ " + client["address"][0] + " connected.")
def on_leave(client, server):
if client["type"] == "watch":
for client in server.clients:
if "type" in client and client["type"] == "display":
server.send_message(client, json.dumps({"type": "data", "hr": 0}))
x_time = []
y_hr = []
print("Client @ " + client["address"][0] + " disconnected.")
def on_message(client, server, message):
send_res = True
try:
message = json.loads(message)
except json.JSONDecodeError:
print("Message from " + client["address"][0] + " could not be decoded.")
res = {}
if "type" not in message:
res = {"type": "error", "message": "NO_MESSAGE_SPECIFIED"}
elif message["type"] == "handshake":
if "role" not in message:
res = {"type": "error", "message": "NO_ROLE_SPECIFIED"}
elif message["role"] not in ["watch", "display"]:
res = {"type": "error", "message": "INVALID_CLIENT_TYPE"}
else:
client["type"] = message["role"]
print("Client @ " + client["address"][0] + " registered as type '" + client["type"] + "'")
res = {"type": "success"}
elif message["type"] == "data":
if "type" not in client:
res = {"type": "error", "message": "UNREGISTERED_CLIENT"}
elif client["type"] != "watch":
res = {"type": "error", "message": "UNINTENDED_OPERATION"}
elif "hr" not in message:
res = {"type": "error", "message": "NO_DATA"}
else:
send_res = False
hr = message["hr"]
if hr < 0:
hr = 0
if hr != 0:
y_hr.append(int(hr))
if len(x_time) == 0:
x_time.append(1)
else:
x_time.append(x_time[-1]+1)
for client in server.clients:
if "type" in client and client["type"] == "display":
server.send_message(client, json.dumps({"type": "data", "hr": hr}))
else:
res = {"type": "error", "message": "INVALID_MESSAGE_TYPE"}
if send_res is True:
if res["type"] == "error":
print("Client triggered an error: " + res["message"])
print("With message: " + json.dumps(message))
server.send_message(client, json.dumps(res))
else:
send_res = True
def run_server():
server = WebsocketServer(9288, host="0.0.0.0")
server.set_fn_new_client(on_join)
server.set_fn_client_left(on_leave)
server.set_fn_message_received(on_message)
print("Bridge server ready.")
server.run_forever()
if __name__ == '__main__':
t = threading.Thread(target=run_server, daemon=True)
t.start()
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(20)
app.exec_()
|
runner.py
|
from contextlib import suppress
import signal
import asyncio
import os
from molotov.api import get_fixture
from molotov.listeners import EventSender
from molotov.stats import get_statsd_client
from molotov.sharedcounter import SharedCounters
from molotov.util import cancellable_sleep, stop, is_stopped, set_timer
from molotov.util import multiprocessing
from molotov.worker import Worker
class Runner(object):
"""Manages processes & workers and grabs results.
"""
def __init__(self, args, loop=None):
self.args = args
self.console = self.args.shared_console
if loop is None:
loop = asyncio.get_event_loop()
self.loop = loop
# the stastd client gets initialized after we fork
# processes in case -p was used
self.statsd = None
self._tasks = []
self._procs = []
self._results = SharedCounters(
"WORKER",
"REACHED",
"RATIO",
"OK",
"FAILED",
"MINUTE_OK",
"MINUTE_FAILED",
"MAX_WORKERS",
"SETUP_FAILED",
"SESSION_SETUP_FAILED",
)
self.eventer = EventSender(self.console)
def _set_statsd(self):
if self.args.statsd:
self.statsd = get_statsd_client(self.args.statsd_address)
else:
self.statsd = None
def run_coro(self, coro):
if not self.loop.is_running():
raise Exception("Loop is not running")
future = asyncio.run_coroutine_threadsafe(coro, self.loop)
return future.result()
def gather(self, *futures):
return asyncio.gather(*futures, loop=self.loop, return_exceptions=True)
def ensure_future(self, coro):
return asyncio.ensure_future(coro, loop=self.loop)
def __call__(self):
global_setup = get_fixture("global_setup")
if global_setup is not None:
try:
global_setup(self.args)
except Exception as e:
self.console.print("The global_setup() fixture failed")
self.console.print_error(e)
raise
try:
return self._launch_processes()
finally:
global_teardown = get_fixture("global_teardown")
if global_teardown is not None:
try:
global_teardown()
except Exception as e:
# we can't stop the teardown process
self.console.print_error(e)
def _launch_processes(self):
args = self.args
signal.signal(signal.SIGINT, self._shutdown)
signal.signal(signal.SIGTERM, self._shutdown)
args.original_pid = os.getpid()
if args.processes > 1:
if not args.quiet:
self.console.print("Forking %d processes" % args.processes)
jobs = []
for i in range(args.processes):
p = multiprocessing.Process(target=self._process)
jobs.append(p)
p.start()
for job in jobs:
self._procs.append(job)
async def run(quiet, console):
while len(self._procs) > 0:
if not quiet:
console.print(self.display_results(), end="\r")
for job in jobs:
if job.exitcode is not None and job in self._procs:
self._procs.remove(job)
await cancellable_sleep(args.console_update)
await self.console.stop()
await self.eventer.stop()
tasks = [
self.ensure_future(self.console.display()),
self.ensure_future(self._send_workers_event(1)),
self.ensure_future(run(args.quiet, self.console)),
]
self.loop.run_until_complete(self.gather(*tasks))
else:
self._process()
return self._results
def _shutdown(self, signal, frame):
stop()
self._kill_tasks()
# send sigterms
for proc in self._procs:
proc.terminate()
def _runner(self):
args = self.args
def _prepare():
tasks = []
delay = 0
if args.ramp_up > 0.0:
step = args.ramp_up / args.workers
else:
step = 0.0
for i in range(self.args.workers):
worker = Worker(
i,
self._results,
self.console,
self.args,
self.statsd,
delay,
self.loop,
)
f = self.ensure_future(worker.run())
tasks.append(f)
delay += step
return tasks
if self.args.quiet:
return _prepare()
else:
msg = "Preparing {} worker{}"
msg = msg.format(args.workers, "s" if args.workers > 1 else "")
return self.console.print_block(msg, _prepare)
def _process(self):
set_timer()
# coroutine that will kill everything when duration is up
if self.args.duration and self.args.force_shutdown:
async def _duration_killer():
cancelled = object()
res = await cancellable_sleep(self.args.duration, result=cancelled)
if res is cancelled or (res and not res.canceled()):
self._shutdown(None, None)
await asyncio.sleep(0)
_duration_killer = self.ensure_future(_duration_killer())
else:
_duration_killer = None
if self.args.processes > 1:
signal.signal(signal.SIGINT, self._shutdown)
signal.signal(signal.SIGTERM, self._shutdown)
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if self.args.debug:
self.console.print("**** RUNNING IN DEBUG MODE == SLOW ****")
self.loop.set_debug(True)
self._set_statsd()
if self.statsd is not None:
self._tasks.append(self.ensure_future(self.statsd.connect()))
if self.args.original_pid == os.getpid():
self._tasks.append(self.ensure_future(self._send_workers_event(1)))
if not self.args.quiet:
fut = self._display_results(self.args.console_update)
update = self.ensure_future(fut)
display = self.ensure_future(self.console.display())
display = self.gather(update, display)
self._tasks.append(display)
workers = self.gather(*self._runner())
def _stop(cb):
if _duration_killer is not None:
if not _duration_killer.done():
_duration_killer.cancel()
stop()
workers.add_done_callback(_stop)
self._tasks.append(workers)
try:
self.loop.run_until_complete(self.gather(*self._tasks))
finally:
if self.statsd is not None:
self.loop.run_until_complete(self.ensure_future(self.statsd.close()))
self._kill_tasks()
self.loop.close()
def _kill_tasks(self):
cancellable_sleep.cancel_all()
for task in reversed(self._tasks):
with suppress(asyncio.CancelledError):
task.cancel()
for task in self._tasks:
del task
self._tasks[:] = []
def display_results(self):
ok, fail = self._results["OK"].value, self._results["FAILED"].value
workers = self._results["WORKER"].value
pat = "SUCCESSES: %s | FAILURES: %s | WORKERS: %s"
return pat % (ok, fail, workers)
async def _display_results(self, update_interval):
while not is_stopped():
self.console.print(self.display_results(), end="\r")
await cancellable_sleep(update_interval)
await self.console.stop()
async def _send_workers_event(self, update_interval):
while not self.eventer.stopped() and not is_stopped():
workers = self._results["WORKER"].value
await self.eventer.send_event("current_workers", workers=workers)
await cancellable_sleep(update_interval)
|
sess.py
|
"""M2Crypto.SSL.Session client demo: This program requests a URL from
a HTTPS server, saves the negotiated SSL session id, parses the HTML
returned by the server, then requests each HREF in a separate thread
using the saved SSL session id.
Copyright (c) 1999-2003 Ng Pheng Siong. All rights reserved."""
from M2Crypto import Err, Rand, SSL, X509, threading
m2_threading = threading; del threading
import formatter, getopt, htmllib, sys
from threading import Thread
from socket import gethostname
def handler(sslctx, host, port, href, recurs=0, sslsess=None):
s = SSL.Connection(sslctx)
if sslsess:
s.set_session(sslsess)
s.connect((host, port))
else:
s.connect((host, port))
sslsess = s.get_session()
#print sslsess.as_text()
if recurs:
p = htmllib.HTMLParser(formatter.NullFormatter())
f = s.makefile("rw")
f.write(href)
f.flush()
while 1:
data = f.read()
if not data:
break
if recurs:
p.feed(data)
if recurs:
p.close()
f.close()
if recurs:
for a in p.anchorlist:
req = 'GET %s HTTP/1.0\r\n\r\n' % a
thr = Thread(target=handler,
args=(sslctx, host, port, req, recurs-1, sslsess))
print "Thread =", thr.getName()
thr.start()
if __name__ == '__main__':
m2_threading.init()
Rand.load_file('../randpool.dat', -1)
host = '127.0.0.1'
port = 9443
req = '/'
optlist, optarg = getopt.getopt(sys.argv[1:], 'h:p:r:')
for opt in optlist:
if '-h' in opt:
host = opt[1]
elif '-p' in opt:
port = int(opt[1])
elif '-r' in opt:
req = opt[1]
ctx = SSL.Context('sslv3')
ctx.load_cert('client.pem')
ctx.load_verify_info('ca.pem')
ctx.load_client_ca('ca.pem')
ctx.set_verify(SSL.verify_none, 10)
req = 'GET %s HTTP/1.0\r\n\r\n' % req
start = Thread(target=handler, args=(ctx, host, port, req, 1))
print "Thread =", start.getName()
start.start()
start.join()
m2_threading.cleanup()
Rand.save_file('../randpool.dat')
|
test_manual_activities.py
|
# -*- mode:python ; fill-column:120 -*-
import time
import unittest
import os
from threading import Thread
from botoflow import (WorkflowDefinition, execute, return_,
ThreadedActivityExecutor, WorkflowWorker, ActivityWorker,
workflow_starter)
from botoflow.manual_activity_completion_client import ManualActivityCompletionClient
from utils import SWFMixIn
from various_activities import BunchOfActivities, ManualActivities
class TestManualActivities(SWFMixIn, unittest.TestCase):
def test_one_manual_activity(self):
swf_client = self.client
class OneManualActivityWorkflow(WorkflowDefinition):
def __init__(self, workflow_execution):
super(OneManualActivityWorkflow, self).__init__(workflow_execution)
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, template):
result = yield ManualActivities.perform_task(template=template)
return_(result)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, OneManualActivityWorkflow)
act_executor = ThreadedActivityExecutor(ActivityWorker(
self.session, self.region, self.domain, self.task_list, ManualActivities()))
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneManualActivityWorkflow.execute(template='instructions.tmpl')
self.workflow_execution = instance.workflow_execution
def complete_this_activity():
activities_client = ManualActivityCompletionClient(swf_client)
with open('task_token.txt', 'r') as shared_file:
task_token = shared_file.read()
os.remove('task_token.txt')
activities_client.complete('Manual Activity Done', task_token)
wf_worker.run_once()
act_executor.start(1, 4)
time.sleep(5)
activity_finisher = Thread(target=complete_this_activity)
activity_finisher.start()
activity_finisher.join()
act_executor.stop()
wf_worker.run_once()
act_executor.join()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 11)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 'Manual Activity Done')
def test_one_manual_one_automatic_activity(self):
swf_client = self.client
class OneManualOneAutomaticActivityWorkflow(WorkflowDefinition):
def __init__(self, workflow_execution):
super(OneManualOneAutomaticActivityWorkflow, self).__init__(workflow_execution)
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, template):
(x, y) = yield ManualActivities.perform_task(template=template)
arg_sum = yield BunchOfActivities.sum(x, y)
return_(arg_sum)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, OneManualOneAutomaticActivityWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list,
BunchOfActivities(), ManualActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneManualOneAutomaticActivityWorkflow.execute(template='instructions.tmpl')
self.workflow_execution = instance.workflow_execution
def complete_this_activity():
activities_client = ManualActivityCompletionClient(swf_client)
with open('task_token.txt', 'r') as shared_file:
task_token = shared_file.read()
os.remove('task_token.txt')
activities_client.complete((3,4), task_token)
wf_worker.run_once()
act_worker.run_once()
time.sleep(5)
activity_finisher = Thread(target=complete_this_activity)
activity_finisher.start()
activity_finisher.join()
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 17)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 7)
if __name__ == '__main__':
unittest.main()
|
test_helm.py
|
import unittest
import os
import sys
from unfurl.yamlmanifest import YamlManifest
from unfurl.job import Runner, JobOptions
from six.moves import urllib
manifest = """
apiVersion: unfurl/v1alpha1
kind: Manifest
spec:
service_template:
imports:
- repository: unfurl
file: configurators/helm-template.yaml
topology_template:
node_templates:
stable_repo:
type: unfurl.nodes.HelmRepository
properties:
name: stable
url: http://localhost:8010/fixtures/helmrepo/
k8sNamespace:
type: unfurl.nodes.K8sNamespace
# these unittests don't define a k8sCluster so we need to comment this out
# requirements:
# - host: k8sCluster
properties:
name: unfurl-helm-unittest
mysql_release:
type: unfurl.nodes.HelmRelease
requirements:
- repository:
node: stable_repo
- host:
node: k8sNamespace
properties:
chart: stable/mysql
release_name: mysql-test
chart_values:
args: []
"""
import threading
import os.path
from functools import partial
# http://localhost:8000/fixtures/helmrepo
@unittest.skipIf("helm" in os.getenv("UNFURL_TEST_SKIP", ""), "UNFURL_TEST_SKIP set")
class HelmTest(unittest.TestCase):
def setUp(self):
server_address = ("", 8010)
directory = os.path.dirname(__file__)
try:
if sys.version_info[0] >= 3:
from http.server import HTTPServer, SimpleHTTPRequestHandler
handler = partial(SimpleHTTPRequestHandler, directory=directory)
self.httpd = HTTPServer(server_address, handler)
else: # for python 2.7
from SimpleHTTPServer import SimpleHTTPRequestHandler
import SocketServer
import urllib
class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
def translate_path(self, path):
path = os.path.normpath(urllib.unquote(path))
words = path.split("/")
words = filter(None, words)
path = directory
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
self.httpd = SocketServer.TCPServer(
server_address, RootedHTTPRequestHandler
)
except: # address might still be in use
self.httpd = None
return
t = threading.Thread(name="http_thread", target=self.httpd.serve_forever)
t.daemon = True
t.start()
def tearDown(self):
if self.httpd:
self.httpd.socket.close()
def test_deploy(self):
# make sure this works
f = urllib.request.urlopen("http://localhost:8010/fixtures/helmrepo/index.yaml")
f.close()
runner = Runner(YamlManifest(manifest))
run1 = runner.run(
JobOptions(planOnly=True, verbose=3, startTime=1)
)
mysql_release = runner.manifest.rootResource.findResource("mysql_release")
query = ".::.requirements::[.name=host]::.target::name"
res = mysql_release.query(query)
assert res == 'unfurl-helm-unittest'
runner = Runner(YamlManifest(manifest))
run1 = runner.run(
JobOptions(dryrun=False, verbose=3, startTime=1)
)
assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()
summary = run1.jsonSummary()
# runner.manifest.statusSummary()
# print(summary)
self.assertEqual(
summary["job"],
{
"id": "A01110000000",
"status": "ok",
"total": 4,
"ok": 4,
"error": 0,
"unknown": 0,
"skipped": 0,
"changed": 4,
},
)
assert all(task["targetStatus"] == "ok" for task in summary["tasks"]), summary[
"tasks"
]
# runner.manifest.dump()
def test_undeploy(self):
runner = Runner(YamlManifest(manifest))
# print('load'); runner.manifest.statusSummary()
run = runner.run(JobOptions(workflow="check", startTime=2))
summary = run.jsonSummary()
assert not run.unexpectedAbort, run.unexpectedAbort.getStackTrace()
# print('check'); runner.manifest.statusSummary()
run2 = runner.run(
JobOptions(workflow="undeploy", startTime=3, destroyunmanaged=True)
)
assert not run2.unexpectedAbort, run2.unexpectedAbort.getStackTrace()
summary = run2.jsonSummary()
# print('undeploy'); runner.manifest.statusSummary()
# note! if tests fail may need to run:
# helm uninstall mysql-test -n unfurl-helm-unittest
# and kubectl delete namespace unfurl-helm-unittest
# note: this test relies on stable_repo being place in the helm cache by test_deploy()
# comment out the repository requirement to run this test standalone
assert all(
task["targetStatus"] == "absent" for task in summary["tasks"]
), summary["tasks"]
self.assertEqual(
summary["job"],
{
"id": "A01130000000",
"status": "ok",
"total": 3,
"ok": 3,
"error": 0,
"unknown": 0,
"skipped": 0,
"changed": 3,
},
)
|
t_est_gui.py
|
"""
Created on 18th April, 2021
"""
import subprocess
import threading
from time import sleep
import pytest
import pyautogui
WIDTH = 800
HEIGHT = 600
class show:
def __init__(self):
self.subP = ()
self.info = ''
self.subP = subprocess.Popen(
'python local_t_est.py',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
def get_status(self):
s_thread = threading.Thread(target=self._get_status)
s_thread.daemon = True
s_thread.start()
def _get_status(self):
a = self.subP.stderr.readline()
self.info = str(a.strip(), 'utf-8')
def test_auto_gui():
ss = show()
ss.get_status()
sleep(2)
# wait till UI is showing
while ss.info != 'qml: window loaded':
print('UI not ready. Sleeping for 1 second')
sleep(1)
print('UI is ready')
# UI is ready lets contine
s_width, s_height = pyautogui.size()
x = (s_width - WIDTH) / 2
# This y calculation successfully
# takes us to the bottom of the title bar
y = (s_height - HEIGHT) / 2
x_mov = 78 + x # center of the button
y_mov = 20 + y # center of the button
pyautogui.moveTo(x_mov, y_mov)
# click the play button
pyautogui.click()
# Pixel Match
pixel_match = False
while not pixel_match:
print('Pixel not ready sleep 7 seconds and repeat')
sleep(7)
ux = int(250+x)
uy = int(250+y)
pixel_match = pyautogui.pixelMatchesColor(ux, uy, (0, 0, 0))
print('Pixel Matched successfully')
# close out
x_end = x + WIDTH - 25
y_end = y - 20
pyautogui.moveTo(x_end, y_end)
sleep(5)
# close
pyautogui.click()
print('closed')
assert pixel_match
|
pytest_pre_encrypted_ota.py
|
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Unlicense OR CC0-1.0
import http.server
import multiprocessing
import os
import socket
import ssl
from typing import Callable
import pexpect
import pytest
from pytest_embedded import Dut
from RangeHTTPServer import RangeRequestHandler
server_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'server_certs/ca_cert.pem')
key_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'server_certs/server_key.pem')
enc_bin_name = 'pre_encrypted_ota_secure.bin'
def get_my_ip() -> str:
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = ''
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip: str, port: int) -> bool:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
return server_status == 0
def https_request_handler() -> Callable[...,http.server.BaseHTTPRequestHandler]:
"""
Returns a request handler class that handles broken pipe exception
"""
class RequestHandler(RangeRequestHandler):
def finish(self) -> None:
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def handle(self) -> None:
try:
RangeRequestHandler.handle(self)
except socket.error:
pass
return RequestHandler
def start_https_server(ota_image_dir: str, server_ip: str, server_port: int) -> None:
os.chdir(ota_image_dir)
requestHandler = https_request_handler()
httpd = http.server.HTTPServer((server_ip, server_port), requestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@pytest.mark.esp32
@pytest.mark.esp32c3
@pytest.mark.esp32s2
@pytest.mark.esp32s3
@pytest.mark.ethernet_ota
def test_examples_protocol_pre_encrypted_ota_example(dut: Dut) -> None:
server_port = 8001
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut.expect(r' (sta|eth) ip: ([^,]+),', timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except pexpect.exceptions.TIMEOUT:
thread1.terminate()
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut.expect('Starting Pre Encrypted OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + enc_bin_name))
dut.write('https://' + host_ip + ':' + str(server_port) + '/' + enc_bin_name)
dut.expect('Magic Verified', timeout=30)
dut.expect('Reading RSA private key', timeout=30)
dut.expect('upgrade successful. Rebooting', timeout=30)
thread1.terminate()
|
pane.py
|
from datetime import datetime
import json
import docker
import urwid
import subprocess
import os
import tempfile
import sys
import time
import ast
import socket
import threading
import logging
import multiprocessing
from contextlib import closing
from twisted.internet import threads, reactor
from console.app import app
from console.ui.containers.inspect import ContainerInspector
from console.widgets.table import Table
from console.highlights import highlighter
from console.widgets.pane import Pane
from console.widgets.dialogs import Prompt, MessageListBox, TableDialog
from console.utils import catch_docker_errors
from console.state import ContainerMonitor, ImageMonitor
def split_repo_name(name):
for idx in range(len(name)):
c = name[-idx]
if c == ':':
return name[:-idx], name[-idx + 1:]
elif c == '/':
return name, ''
return name, None
def clean_name(name):
name = name.replace("u","")
name = name.replace("'","")
return name
class AlwaysFocusedEdit(urwid.Edit):
def render(self, size, focus=False):
return super(AlwaysFocusedEdit, self).render(size, focus=True)
class ContainerPane(Pane):
def __init__(self):
self.monitored = ContainerMonitor(docker.Client('unix://var/run/docker.sock', '1.18'))
self.monitored.get_containers()
self.container_data = []
self.containers = {}
self.edit = AlwaysFocusedEdit("filter: ", multiline=False)
self.listing = self.init_listing()
self.filter = ""
self.commands = ""
self.marked_containers = {}
self.in_inspect = False
self.in_diff = False
self.in_top = False
self.size = ()
Pane.__init__(self, urwid.Frame(
self.listing,
self.edit,
))
self.original_widget.focus_position = 'body'
urwid.connect_signal(self.monitored, 'container-list', self.set_containers)
self.counter = 1
def init_listing(self):
schema = (
{'name': 'Id'},
{'name': 'Image'},
{'name': 'Command'},
{'name': 'Status'},
{'name': 'Names'},
)
return Table(schema, header=True)
def make_container_row(self, container):
if "seconds" in container['status']:
container['status'] = "Up 0 minutes"
row = self.listing.create_row({
'Id': container['id'][:12],
'Image': container['image'],
'Command': container['command'],
'Status': container['status'],
'Names': container['names'],
})
row.image = container['image']
row.container = container['id']
row.name = container['names']
row.status = container['status']
self.containers[row.container] = row
return row
def set_containers(self, containers, force=False):
# save the current position
_, current_focus = self.listing.get_focus()
if containers == self.container_data and not force:
return
self.listing.clear()
self.old_containers = self.containers
self.containers = {}
running = [c for c in containers if 'Exited' not in c['status']]
stopped = [c for c in containers if 'Exited' in c['status']]
filter = self.filter.lower()
def remove_highlight(row):
highlighter.remove(row)
for container in running:
in_names = any(filter in name.lower() for name in container['names'])
in_id = filter in container['id'].lower()
in_status = filter in container['status'].lower()
in_image = filter in container['image'].lower()
row = self.make_container_row(container)
if any((in_names, in_id, in_status, in_image)):
self.listing.walker.append(row)
if self.old_containers and row.container not in self.old_containers:
highlighter.apply(row, 'created', 'created')
reactor.callLater(1, highlighter.remove, row)
for container in stopped:
in_names = any(filter in name.lower() for name in container['names'])
in_id = filter in container['id'].lower()
in_status = filter in container['status'].lower()
in_image = filter in container['image'].lower()
row = self.make_container_row(container)
if any((in_names, in_id, in_status, in_image)):
self.listing.walker.append(row)
if self.old_containers and row.container not in self.old_containers:
highlighter.apply(row, 'created', 'created')
reactor.callLater(1, highlighter.remove, row)
self.container_data = containers
self.listing.set_focus(current_focus)
self.listing.fix_focus()
app.draw_screen()
def thread(self):
return True
def keypress(self, size, event):
self.size = size
self.since_time = time.time()
if event == 'close-dialog':
if self.in_inspect:
self.in_inspect = False
if self.in_diff:
self.in_diff = False
if self.in_top:
self.in_top = False
if event == 'scroll-close':
event = 'close-dialog'
if self.dialog:
return super(ContainerPane, self).keypress(size, event)
if self.listing.keypress(size, event):
if self.handle_event(event):
if not self.dialog and self.edit.keypress((size[0], ), event):
return super(ContainerPane, self).keypress(size, event)
else:
self.filter = self.edit.edit_text
self.set_containers(self.container_data, force=True)
self.on_unmark()
if event not in ['next-container', 'prev-container', 'set-mark', 'unmark-containers']:
thread = threading.Thread(name='thread', target=self.thread)
listener = threading.Thread(name='listener', target=self.listener)
listener.setDaemon(True)
listener.start()
thread.start()
def handle_event(self, event):
if event == 'next-container':
self.on_next()
if self.in_inspect or self.in_diff or self.in_top:
self.keypress(self.size, 'scroll-close')
if self.in_inspect:
self.on_inspect()
if self.in_diff:
self.on_diff()
if self.in_top:
self.on_top()
elif event== 'prev-container':
self.on_prev()
if self.in_inspect or self.in_diff or self.in_top:
self.keypress(self.size, 'scroll-close')
if self.in_inspect:
self.on_inspect()
if self.in_diff:
self.on_diff()
if self.in_top:
self.on_top()
elif event == 'toggle-show-all':
self.on_all()
self.monitored.get_containers()
elif event == 'delete-container':
self.dict_on_delete()
elif event == 'commit-container':
self.on_commit()
elif event == 'inspect-details':
self.in_inspect = True
self.on_inspect()
elif event == 'set-mark':
self.on_mark()
elif event == 'run-container(s)':
self.on_run()
elif event == 'unmark-containers':
self.on_unmark()
elif event == 'rename-container':
self.on_rename()
self.monitored.get_containers()
elif event == 'inspect-changes':
self.in_diff = True
self.on_diff()
elif event == 'restart-container':
self.on_restart()
elif event == 'kill-container':
self.on_kill()
elif event == 'pause-container':
self.on_pause()
elif event == 'unpause-container':
self.on_unpause()
elif event == 'start-container':
self.on_start()
elif event == 'stop-container':
self.on_stop()
elif event == 'top-container':
self.in_top = True
self.on_top()
else:
return super(ContainerPane, self).handle_event(event)
def make_screen_command(self):
row = 0
none_marked = True
for k, v in self.marked_containers.items():
if v == "marked" and 'Exited' not in k.status:
self.commands += "screen %d docker exec -it %s bash\n" % (row, k.container)
self.commands += "title %s\n" % k.image
row += 1
none_marked = False
if none_marked:
widget, idx = self.listing.get_focus()
if 'Exited' in widget.status:
return
self.commands += "screen 0 docker exec -it %s bash\n" % widget.container
self.commands += "title %s\n" % widget.image
self.commands += "caption always\n"
temp = tempfile.NamedTemporaryFile()
with open(temp.name, "wt") as fout:
fout.write(self.commands)
if self.commands == "":
return
subprocess.call(["screen", "-c" "%s" % temp.name])
temp.close()
app.client.close()
raise urwid.ExitMainLoop
def make_tmux_command(self):
self.commands += "#!/bin/bash\n"
self.commands += "tmux new-session -d -s run-containers\n"
row = 1
none_marked = True
for k, v in self.marked_containers.items():
if v == "marked" and 'Exited' not in k.status:
self.commands += "tmux new-window -t run-containers:%d -n '%s' 'docker exec -it %s bash'\n" % (row, k.image, k.container)
row += 1
none_marked = False
if none_marked:
widget, idx = self.listing.get_focus()
if 'Exited' in widget.status:
return
self.commands += "tmux new-window -t run-containers:1 -n '%s' 'docker exec -it %s bash'\n" % (widget.image, widget.container)
self.commands += "tmux select-window -t run-containers:1\n"
self.commands += "tmux -2 attach-session -t run-containers\n"
temp = tempfile.NamedTemporaryFile()
with open(temp.name, "wt") as fout:
fout.write(self.commands)
if self.commands == "":
return
subprocess.call(["rbash", "%s" % temp.name])
temp.close()
app.client.close()
raise urwid.ExitMainLoop
def make_command(self, which_mux):
if (which_mux == "screen" or which_mux == "tmux or screen?: screen"
or which_mux == "tmux or screen?:screen"):
self.make_screen_command()
elif (which_mux == "tmux" or which_mux == "tmux or screen?: tmux"
or which_mux == "tmux or screen?:tmux"):
self.make_tmux_command()
else:
self.on_run()
def on_run(self):
which_mux = "tmux or screen?: "
prompt = Prompt(lambda which_mux: self.make_command(which_mux), title="Run Container:", initial=which_mux)
self.show_dialog(prompt)
def on_next(self):
self.listing.next()
def on_prev(self):
self.listing.prev()
def on_mark(self):
marked_widget, marked_id = self.listing.get_focus()
if (marked_widget in self.marked_containers and
self.marked_containers[marked_widget] == "marked"):
del self.marked_containers[marked_widget]
self.listing.unmark()
else:
self.marked_containers[marked_widget] = "marked"
self.listing.mark()
def on_unmark(self):
for key, value in self.marked_containers.items():
if value == "marked":
key.set_attr_map({None:None})
del self.marked_containers[key]
def on_all(self):
self.on_unmark()
self.monitored.all = not self.monitored.all
def dict_on_delete(self):
none_marked = True
for key, value in self.marked_containers.items():
if value == "marked":
widget = key
self.on_delete(widget)
del self.marked_containers[key]
none_marked = False
if none_marked:
widget, idx = self.listing.get_focus()
self.on_delete(widget)
@catch_docker_errors
def on_delete(self, widget):
highlighter.apply(widget, 'deleted', 'deleted')
reactor.callLater(2.5, highlighter.remove, widget)
return threads.deferToThread(app.client.remove_container, widget.container)
@catch_docker_errors
def perform_start(self, widget):
return threads.deferToThread(app.client.start, widget)
def on_start(self):
none_marked = True
for key, value in self.marked_containers.items():
if value == "marked":
self.perform_start(key.container)
none_marked = False
if none_marked:
widget, idx = self.listing.get_focus()
self.perform_start(widget.container)
else:
self.on_unmark()
@catch_docker_errors
def perform_stop(self, widget):
return threads.deferToThread(app.client.stop, widget)
def on_stop(self):
none_marked = True
for key, value in self.marked_containers.items():
if value == "marked":
self.perform_stop(key.container)
none_marked = False
if none_marked:
widget, idx = self.listing.get_focus()
self.perform_stop(widget.container)
else:
self.on_unmark()
@catch_docker_errors
def perform_pause(self, widget):
return threads.deferToThread(app.client.pause, widget)
def on_pause(self):
none_marked = True
if len(self.marked_containers) > 0:
for key, value in self.marked_containers.items():
if value == "marked":
self.perform_pause(key.container)
none_marked = False
if none_marked:
widget, idx = self.listing.get_focus()
self.perform_pause(widget.container)
else:
self.on_unmark()
@catch_docker_errors
def perform_unpause(self, widget):
return threads.deferToThread(app.client.unpause, widget)
def on_unpause(self):
none_marked = True
for key, value in self.marked_containers.items():
if value == "marked":
self.perform_unpause(key.container)
none_marked = False
if none_marked:
widget, idx = self.listing.get_focus()
self.perform_unpause(widget.container)
else:
self.on_unmark()
@catch_docker_errors
def perform_kill(self, widget):
return threads.deferToThread(app.client.kill, widget)
def on_kill(self):
none_marked = True
for key, value in self.marked_containers.items():
if value == "marked":
self.perform_kill(key.container)
none_marked = False
if none_marked:
widget, idx = self.listing.get_focus()
self.perform_kill(widget.container)
else:
self.on_unmark()
@catch_docker_errors
def perform_commit(self, container, repo_name):
name, tag = split_repo_name(repo_name)
repo_name = name + ":" + (tag or 'latest')
self.close_dialog()
return threads.deferToThread(app.client.commit, container, name, tag or 'latest')
def on_commit(self):
widget, idx = self.listing.get_focus()
name, tag = split_repo_name(widget.image)
prompt = Prompt(lambda name: self.perform_commit(widget.container, name), title="Tag Container", initial=name)
self.show_dialog(prompt)
@catch_docker_errors
def perform_restart(self, widget):
return threads.deferToThread(app.client.restart, widget)
def on_restart(self):
none_marked = True
for key, value in self.marked_containers.items():
if value == "marked":
self.perform_restart(key.container)
none_marked = False
if none_marked:
widget, idx = self.listing.get_focus()
self.perform_restart(widget.container)
else:
self.on_unmark()
@catch_docker_errors
def perform_rename(self, container, name):
self.close_dialog()
return threads.deferToThread(app.client.rename, container, name)
def on_rename(self):
widget, idx = self.listing.get_focus()
name = clean_name(widget.name[0])
prompt = Prompt(lambda name: self.perform_rename(widget.container, name), title="Rename Container", initial=name)
self.show_dialog(prompt)
self.monitored.get_containers()
@catch_docker_errors
def on_inspect(self):
widget, idx = self.listing.get_focus()
d = threads.deferToThread(app.client.inspect_container, widget.container)
d.addCallback(lambda data: self.show_dialog(ContainerInspector(data)))
return d
def _show_diff(self, diff_json, container_id):
for d in diff_json:
if d['Kind'] == 0:
d['Kind'] = 'Change'
elif d['Kind'] == 1:
d['Kind'] = 'Add'
elif d['Kind'] == 2:
d['Kind'] = 'Delete'
diffs = [(d.get('Kind',''), d.get('Path','')) for d in diff_json]
dialog = TableDialog(
"Changes in %s" % container_id[:12],
diffs,
[
{'value':"kind", 'weight':1, 'align':'center'},
{'value':"path", 'weight':4, 'align':'center'}
]
)
dialog.width = ('relative', 90)
self.show_dialog(dialog, )
@catch_docker_errors
def on_diff(self):
widget, idx = self.listing.get_focus()
d = threads.deferToThread(app.client.diff, widget.container)
d.addCallback(self._show_diff, widget.container)
d.addCallback(lambda r: app.draw_screen())
return d
def _show_top(self, top_json, container_id):
processes = top_json.get('Processes','')
titles = top_json.get('Titles','')
dialog = TableDialog(
"Running Processes in %s" % container_id[:12],
processes,
[
{'value':titles[0], 'weight':3, 'align':'center'},
{'value':titles[1], 'weight':3, 'align':'center'},
{'value':titles[2], 'weight':3, 'align':'center'},
{'value':titles[3], 'weight':1, 'align':'center'},
{'value':titles[4], 'weight':3, 'align':'center'},
{'value':titles[5], 'weight':3, 'align':'center'},
{'value':titles[6], 'weight':3, 'align':'center'},
{'value':titles[7], 'weight':10, 'align':'center'},
]
)
dialog.width = ('relative', 90)
self.show_dialog(dialog, )
@catch_docker_errors
def on_top(self):
widget, idx = self.listing.get_focus()
d = threads.deferToThread(app.client.top, widget.container)
d.addCallback(self._show_top, widget.container)
d.addCallback(lambda r: app.draw_screen())
return d
def listener(self):
s = socket.socket(socket.AF_UNIX)
s.connect('/var/run/docker.sock')
with closing(s):
s.sendall(b'GET /events?since=%d HTTP/1.1\n\n' % self.since_time)
header = s.recv(4096)
chunk = s.recv(4096)
self.monitored.get_containers()
|
local_benchmark.py
|
# Copyright (c) The Libra Core Contributors
# SPDX-License-Identifier: Apache-2.0
# Sample benchmark to profile performance and observe bottlenecks.
#
# Run as:
# $ python -m cProfile -s tottime src/scripts/run_perf.py > report.txt
#
from ..business import VASPInfo
from ..libra_address import LibraAddress
from ..payment_logic import PaymentCommand
from ..status_logic import Status
from ..sample.sample_db import SampleDB
from ..payment import PaymentAction, PaymentActor, PaymentObject, StatusObject
from ..core import Vasp
from .basic_business_context import TestBusinessContext
from ..crypto import ComplianceKey
from threading import Thread
import time
import asyncio
# A stand alone performance test.
PeerA_addr = LibraAddress.from_bytes("lbr", b'A'*16)
PeerB_addr = LibraAddress.from_bytes("lbr", b'B'*16)
peer_address = {
PeerA_addr.as_str(): 'http://localhost:8091',
PeerB_addr.as_str(): 'http://localhost:8092',
}
peer_keys = {
PeerA_addr.as_str(): ComplianceKey.generate(),
PeerB_addr.as_str(): ComplianceKey.generate(),
}
class SimpleVASPInfo(VASPInfo):
def __init__(self, my_addr):
self.my_addr = my_addr
def get_peer_base_url(self, other_addr):
assert other_addr.as_str() in peer_address
return peer_address[other_addr.as_str()]
def get_peer_compliance_verification_key(self, other_addr):
key = ComplianceKey.from_str(peer_keys[other_addr].export_pub())
assert not key._key.has_private
return key
def get_my_compliance_signature_key(self, my_addr):
return peer_keys[my_addr]
def is_authorised_VASP(self, certificate, other_addr):
return True
def start_thread_main(vasp, loop):
# Initialize the VASP services.
vasp.start_services()
try:
# Start the loop
loop.run_forever()
finally:
# Do clean up
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
print('VASP loop exit...')
def make_new_VASP(Peer_addr, port, reliable=True):
VASPx = Vasp(
Peer_addr,
host='localhost',
port=port,
business_context=TestBusinessContext(Peer_addr, reliable=reliable),
info_context=SimpleVASPInfo(Peer_addr),
database=SampleDB())
loop = asyncio.new_event_loop()
VASPx.set_loop(loop)
# Create and launch a thread with the VASP event loop
t = Thread(target=start_thread_main, args=(VASPx, loop))
t.start()
print(f'Start Node {port}')
# Block until the event loop in the thread is running.
VASPx.wait_for_start()
return (VASPx, loop, t)
async def main_perf(messages_num=10, wait_num=0, verbose=False):
VASPa, loopA, tA = make_new_VASP(PeerA_addr, port=8091)
VASPb, loopB, tB = make_new_VASP(PeerB_addr, port=8092, reliable=False)
# Get the channel from A -> B
channelAB = VASPa.vasp.get_channel(PeerB_addr)
channelBA = VASPb.vasp.get_channel(PeerA_addr)
# Define a payment command
commands = []
payments = []
for cid in range(messages_num):
peerA_addr = PeerA_addr.as_str()
sub_a = LibraAddress.from_bytes("lbr", b'A'*16, b'a'*8).as_str()
sub_b = LibraAddress.from_bytes("lbr", b'B'*16, b'b'*8).as_str()
sender = PaymentActor(sub_a, StatusObject(Status.needs_kyc_data), [])
receiver = PaymentActor(sub_b, StatusObject(Status.none), [])
action = PaymentAction(10, 'TIK', 'charge', 984736)
payment = PaymentObject(
sender, receiver, f'{peerA_addr}_ref{cid:08d}', None, 'Description ...', action
)
kyc_data = asyncio.run_coroutine_threadsafe(VASPa.bc.get_extended_kyc(payment), loopA)
kyc_data = kyc_data.result()
payment.sender.add_kyc_data(kyc_data)
payments += [payment]
cmd = PaymentCommand(payment)
commands += [cmd]
async def send100(nodeA, commands):
res = await asyncio.gather(
*[nodeA.new_command_async(VASPb.my_addr, cmd) for cmd in commands],
return_exceptions=True)
return res
async def wait_for_all_payment_outcome(nodeA, payments, results):
fut_list = [nodeA.wait_for_payment_outcome_async(
p.reference_id, timeout=60.0) for p,r in zip(payments, results)]
res = await asyncio.gather(
*fut_list,
return_exceptions=True)
return res
# Execute 100 requests
print('Inject commands')
s = time.perf_counter()
results = asyncio.run_coroutine_threadsafe(send100(VASPa, commands), loopA)
results = results.result()
# Print the result for all initial commands
if verbose: # verbose:
for res in results:
print('RES:', res)
elapsed = (time.perf_counter() - s)
print('Wait for all payments to have an outcome')
outcomes = asyncio.run_coroutine_threadsafe(
wait_for_all_payment_outcome(VASPa, payments, results), loopA)
outcomes = outcomes.result()
# Print the result for all requests
if verbose:
for out, res in zip(outcomes, results):
if not isinstance(out, Exception):
print('OUT OK:', out.sender.status.as_status(), out.receiver.status.as_status())
else:
print('OUT NOTOK:', type(out), str(out))
print('All payments done.')
# Print some statistics
success_number = sum([1 for r in results if type(r) == bool and r])
print(f'Commands executed in {elapsed:0.2f} seconds.')
print(f'Success #: {success_number}/{len(commands)}')
# In case you want to wait for other responses to settle
#
wait_for = wait_num
for t in range(wait_for):
print('waiting', t)
await asyncio.sleep(1.0)
# Check that all the payments have been processed and stored.
for payment in payments:
ref = payment.reference_id
_ = VASPa.get_payment_by_ref(ref)
hist = VASPa.get_payment_history_by_ref(ref)
if verbose:
if len(hist) > 1:
print('--'*40)
for p in hist:
print(p.pretty())
print(f'Estimate throughput #: {len(commands)/elapsed} Tx/s')
# Close the loops
VASPa.close()
VASPb.close()
# List the remaining retransmits
rAB = channelAB.pending_retransmit_number()
rBA = channelBA.pending_retransmit_number()
print(f'Pending retransmit: VASPa {rAB} VASPb {rBA}')
|
Colector_dockwidget.py
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ColectorGPSDockWidget
A QGIS plugin
Complemento que permite la recolección de información espacial y alfanumérica en QGIS
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2021-03-04
git sha : https://github.com/AlexanderPretel/Colector
copyright : (C) 2021 by Alexander Pretel Díaz
email : alexander.pretel@correounivalle.edu.co
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Importar librerías necesarias
import os
import subprocess
import threading
from qgis.utils import *
from qgis.core import *
from qgis.gui import *
from qgis.PyQt import QtGui, QtWidgets, uic
from qgis.PyQt.QtCore import *
from qgis.PyQt.QtGui import *
from qgis.PyQt.QtWidgets import *
from datetime import datetime
import time
import concurrent.futures
#Importa la interfaz gráfica de la aplicación y permite acceder a los objetos que la controlan y que pueden interactuar con el usuario
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'Colector_dockwidget_base.ui'))
class ColectorGPSDockWidget(QtWidgets.QDockWidget, FORM_CLASS):
closingPlugin = pyqtSignal()
#Función de inicio quien tiene la función de ser el constructor del programa, sus objetos y variables
def __init__(self, parent=None):
"""Constructor."""
#Elementos que alimentan la interfaz y declaración de variables globales
self.iface = iface
self.tools = self.iface.vectorLayerTools()
self.prj = QgsProject().instance()
super(ColectorGPSDockWidget, self).__init__(parent)
self.setupUi(self)
self.src_crs = QgsCoordinateReferenceSystem("EPSG:4326")
self.prj.setCrs(self.src_crs)
self.dest_crs = self.prj.crs()
self.transformation = QgsCoordinateTransform(self.src_crs, self.dest_crs, self.prj)
self.marker = None
self.marker_polyline = None
self.marker_polygon = None
self.gpsCon = None
self.detector = None
self.gps_active = False
self.colectar = False
self.listaCoordenadas = None
self.vertice = False
self.field_combo_box = QgsFieldComboBox()
self.canvas = self.iface.mapCanvas()
self.mMapLayerComboBox.setFilters(QgsMapLayerProxyModel.VectorLayer)
self.openProject = False
self.homeFolder = str(os.getcwd())
self.nameProject = None
self.pathProject = None
self.folder = datetime.now().strftime('%d_%m_%y')
self.pathFolder = None
if not os.path.exists("{0}/Proyectos/".format(str(self.homeFolder))):
os.mkdir("{0}/Proyectos/".format(str(self.homeFolder)))
self.tiempoCaptura = 5 #Por defecto el tiempo de captura se establece en 5 segundos. Pero el usuario lo puede cambiar
self.btnubicar.clicked.connect(self.locate)
self.mMapLayerComboBox.layerChanged.connect(self.field_combo_box.setLayer)
self.btnubicarycentrar.clicked.connect(self.collect)
self.btnaddVertice.clicked.connect(self.add_new_vertex)
self.btnendGeometry.clicked.connect(self.create_geometry_polygon_or_line)
self.crearProyecto.clicked.connect(self.newProject)
self.cargarProyecto.clicked.connect(self.existProject)
self.btnImportarCapas.clicked.connect(self.importCapas)
def newProject(self):
self.pathProject = QtWidgets.QFileDialog.getExistingDirectory(None, 'Crear proyecto', str(self.homeFolder) + "/Proyectos/")
if not self.pathProject == '':
self.tiempoCaptura = self.tiempoColeccion.currentText()
self.openProject = True
self.enableProject()
self.folder = datetime.now().strftime('%d_%m_%y')
if not os.path.exists("{0}/Base/".format(self.pathProject)):
os.mkdir("{0}/Base/".format(self.pathProject))
if not os.path.exists("{0}/Export/".format(self.pathProject)):
os.mkdir("{0}/Export/".format(self.pathProject))
if not os.path.exists("{0}/Survey/".format(self.pathProject)):
os.mkdir("{0}/Survey/".format(self.pathProject))
self.pathFolder = "{0}/Survey/{1}/".format(self.pathProject, str(self.folder))
if not os.path.exists(self.pathFolder):
os.mkdir(self.pathFolder)
qgzProject = "{0}/Qgs.qgz".format(self.pathProject)
self.prj.write(qgzProject)
self.prj.setTitle(str(self.pathProject))
else:
self.iface.messageBar().pushMessage(u'Colector Infomra', u'No se ha podido crear el proyecto',
level=Qgis.Critical)
def existProject(self):
self.pathProject = QtWidgets.QFileDialog.getExistingDirectory(None, 'Abrir proyecto', str(self.homeFolder) + "/Proyectos/")
if not self.pathProject == '':
self.prj.read("{0}/Qgs.qgz".format(self.pathProject))
self.openProject = True
self.enableProject()
if not os.path.exists("{0}/Base/".format(self.pathProject)):
os.mkdir("{0}/Base/".format(self.pathProject))
if not os.path.exists("{0}/Export/".format(self.pathProject)):
os.mkdir("{0}/Export/".format(self.pathProject))
if not os.path.exists("{0}/Survey/".format(self.pathProject)):
os.mkdir("{0}/Survey/".format(self.pathProject))
self.folder = datetime.now().strftime('%d_%m_%y')
self.pathFolder = "{0}/Survey/{1}/".format(self.pathProject, str(self.folder))
print(self.pathFolder)
if not os.path.exists(self.pathFolder):
os.mkdir(self.pathFolder)
self.prj.setTitle(str(self.pathProject))
else:
self.iface.messageBar().pushMessage(u'Colector Infomra',
u'Hubo un problema con el proyecto. Vuelve a importarlo o crea uno nuevo',
level=Qgis.Critical)
def importCapas(self):
dirBase = "{0}/Base/".format(self.pathProject)
if os.path.exists(dirBase):
listDir = os.walk(dirBase)
for root, dirs, files in listDir:
for fichero in files:
(nombre, extension) = os.path.splitext(fichero)
if extension == ".shp":
layer = QgsVectorLayer(root+nombre+extension, nombre, "ogr")
if not layer.isValid():
print("Layer failed to load!")
else:
self.prj.addMapLayer(layer)
elif extension == ".tiff" or extension == ".tif" or extension == ".TIF" or extension == ".TIFF" or extension == ".ecw" or extension == ".mbtiles":
layer = QgsRasterLayer(root+nombre+extension, nombre)
if not layer.isValid():
print("Layer failed to load!")
else:
self.prj.addMapLayer(layer)
def enableProject(self):
if self.openProject:
self.tabWidgetPage2.setEnabled(True)
self.tiempoColeccion.setEnabled(True)
self.btnImportarCapas.setEnabled(True)
else:
self.tabWidgetPage2.setEnabled(False)
self.tiempoColeccion.setEnabled(False)
self.btnImportarCapas.setEnabled(False)
def locate(self):
try:
if self.btnubicar.text() == "Ubicar":
self.btnubicar.setText("Desconectar")
print('GPS ubicar')
if self.marker is not None:
self.canvas.scene().removeItem(self.marker)
self.detector = QgsGpsDetector("/dev/ttyACM0")
# print(self.detector)
self.detector.detected[QgsGpsConnection].connect(self.connection_succeed)
self.detector.detectionFailed.connect(self.connection_failed)
self.detector.advance()
elif self.btnubicar.text() == "Desconectar":
self.deactivate()
except Exception as e:
print(e)
def deactivate(self):
try:
print('GPS deactivated')
if self.gpsCon is not None:
self.gpsCon.close()
self.btnubicar.setText("Ubicar")
if self.canvas is not None:
self.marker.setColor(QColor(0, 0, 255))
# self.canvas.scene().removeItem(self.marker)
self.gps_active = False
except Exception as e:
print(e)
def connection_succeed(self, connection):
try:
print("GPS connected")
self.gps_active = True
self.gpsCon = connection
self.prj.setCrs(self.src_crs)
if not self.colectar and not self.vertice:
self.gpsCon.stateChanged.connect(self.status_changed)
elif self.colectar and not self.vertice:
self.gpsCon.stateChanged.connect(self.create_geometry)
elif self.vertice:
self.gpsCon.stateChanged.connect(self.create_vertex)
# marker
self.marker = QgsVertexMarker(self.canvas)
self.marker.setColor(QColor(255, 0, 0)) # (R,G,B)
self.marker.setIconSize(10)
self.marker.setIconType(QgsVertexMarker.ICON_CIRCLE)
self.marker.setPenWidth(3)
except Exception as e:
print(e)
def status_changed(self, gpsInfo):
try:
if gpsInfo.latitude != 0 and gpsInfo.longitude != 0:
wgs84_pointXY = QgsPointXY(gpsInfo.longitude, gpsInfo.latitude)
wgs84_point = QgsPoint(wgs84_pointXY)
wgs84_point.transform(self.transformation)
mapPointXY = QgsPointXY(wgs84_point.x(), wgs84_point.y())
if gpsInfo.pdop >= 1: # gps ok
self.marker.setColor(QColor(0, 200, 0))
else:
self.marker.setColor(QColor(255, 0, 0))
self.marker.setCenter(mapPointXY)
self.marker.show()
self.canvas.refresh()
self.canvas.setCenter(mapPointXY)
except Exception as e:
print(e)
def create_geometry(self, gpsInfo):
try:
if self.colectar and gpsInfo.isValid():
if gpsInfo.latitude != 0 and gpsInfo.longitude != 0:
self.status_changed(gpsInfo)
layer = self.field_combo_box.layer()
self.tiempoCaptura = self.tiempoColeccion.currentText()
self.prj.setCrs(self.src_crs)
if layer.geometryType() == QgsWkbTypes.PointGeometry:
print("colectar punto: lat ", gpsInfo.latitude, " lon ", gpsInfo.longitude)
self.colectar = False
field_idx = layer.fields().indexOf('id')
layer.startEditing()
if field_idx == -1:
layer.addAttribute(QgsField('id', QVariant.Int))
field_idx = layer.fields().indexOf('id')
if layer.featureCount() == 0:
idActual = 1
elif layer.featureCount() > 0:
idActual = int(max([num[field_idx] for num in layer.getFeatures()])) + 1
config = layer.editFormConfig()
if not config.readOnly(field_idx):
config.setReadOnly(field_idx, True)
layer.setEditFormConfig(config)
feature = QgsFeature(layer.fields())
feature.setAttribute(field_idx, idActual)
feature.setGeometry(QgsPoint(gpsInfo.longitude, gpsInfo.latitude))
self.deactivate()
form = QgsAttributeDialog(layer, feature, False)
form.setMode(QgsAttributeEditorContext.AddFeatureMode)
result = form.exec_()
if result == 1:
atributos = form.feature().attributes()
future = concurrent.futures.ThreadPoolExecutor().submit(self.collect_ubx, layer, idActual)
#thread = threading.Thread(target=self.collect_ubx, args=(layer, idActual))
#thread.start()
progressMessageBar = self.iface.messageBar().createMessage("Colectando ubx...")
progress = QProgressBar()
progress.setMaximum(int(self.tiempoCaptura)+1)
progressMessageBar.layout().addWidget(progress)
self.iface.messageBar().pushWidget(progressMessageBar, Qgis.Info)
for i in range(int(self.tiempoCaptura)+1):
time.sleep(1)
progress.setValue(i + 1)
self.iface.messageBar().clearWidgets()
if future.result() == 0:
#print(future.result())
self.iface.messageBar().pushInfo(u'Colector Infomra',
u'Punto agregado en {0}'.format(str(layer.name())))
self.create_txt_with_features(layer, attr=atributos)
layer.updateExtents()
self.tools.saveEdits(layer)
self.prj.addMapLayer(layer)
self.tools.stopEditing(layer, False)
self.canvas.refresh()
elif result == 0:
self.iface.messageBar().pushMessage(u'Colector Infomra', u'No se pudo colectar el punto',
level=Qgis.Warning)
self.tools.stopEditing(layer, False)
elif layer.geometryType() == QgsWkbTypes.LineGeometry or layer.geometryType() == QgsWkbTypes.PolygonGeometry:
self.tableVertices.clearContents()
#print(self.listaCoordenadas)
self.listaCoordenadas = []
#print(self.listaCoordenadas)
self.colectar = False
self.mMapLayerComboBox.setEnabled(False)
self.tableVertices.setEnabled(True)
self.btnaddVertice.setEnabled(True)
self.btnubicarycentrar.setText("Cancelar")
id = len(self.listaCoordenadas) + 1
self.listaCoordenadas.append([id, gpsInfo.latitude, gpsInfo.longitude, gpsInfo.elevation])
print(self.listaCoordenadas)
rowPosition = self.tableVertices.rowCount()
self.tableVertices.setRowCount(rowPosition + 1)
self.tableVertices.setItem(rowPosition, 0, QTableWidgetItem(str(id)))
self.tableVertices.setItem(rowPosition, 1, QTableWidgetItem(str(gpsInfo.latitude)))
self.tableVertices.setItem(rowPosition, 2, QTableWidgetItem(str(gpsInfo.longitude)))
self.tableVertices.setItem(rowPosition, 3, QTableWidgetItem(str(gpsInfo.elevation)))
self.deactivate()
field_idx = layer.fields().indexOf('id')
layer.startEditing()
if field_idx == -1:
layer.addAttribute(QgsField('id', QVariant.Int))
field_idx = layer.fields().indexOf('id')
if layer.featureCount() == 0:
idActual = 1
elif layer.featureCount() > 0:
idActual = int(max([num[field_idx] for num in layer.getFeatures()])) + 1
future = concurrent.futures.ThreadPoolExecutor().submit(self.collect_ubx, layer, idActual, id)
progressMessageBar = self.iface.messageBar().createMessage("Colectando ubx...")
progress = QProgressBar()
progress.setMaximum(int(self.tiempoCaptura) + 1)
progressMessageBar.layout().addWidget(progress)
self.iface.messageBar().pushWidget(progressMessageBar, Qgis.Info)
for i in range(int(self.tiempoCaptura) + 1):
time.sleep(1)
progress.setValue(i + 1)
self.iface.messageBar().clearWidgets()
if future.result() == 0:
self.iface.messageBar().pushInfo(u'Colector Infomra', u'Vértice agregado')
layer.updateExtents()
self.tools.saveEdits(layer)
self.prj.addMapLayer(layer)
self.tools.stopEditing(layer, False)
self.canvas.refresh()
elif not self.colectar:
self.deactivate()
except ValueError as e:
self.deactivate()
print(e)
def create_geometry_polygon_or_line(self):
layer = self.field_combo_box.layer()
if layer.geometryType() == QgsWkbTypes.PolygonGeometry:
points = []
layer.startEditing()
for iterador in self.listaCoordenadas:
if iterador[2] != 0 and iterador[1] != 0:
points.append(QgsPointXY(iterador[2], iterador[1]))
field_idx = layer.fields().indexOf('id')
if field_idx == -1:
layer.addAttribute(QgsField('id', QVariant.Int))
field_idx = layer.fields().indexOf('id')
if layer.featureCount() == 0:
idActual = 1
elif layer.featureCount() > 0:
idActual = int(max([num[field_idx] for num in layer.getFeatures()])) + 1
config = layer.editFormConfig()
if config.readOnly(field_idx):
config.setReadOnly(field_idx, True)
layer.setEditFormConfig(config)
feature = QgsFeature(layer.fields())
feature.setGeometry(QgsGeometry.fromPolygonXY([points]))
feature.setAttribute(field_idx, idActual)
form = QgsAttributeDialog(layer, feature, False)
form.setMode(QgsAttributeEditorContext.AddFeatureMode)
result = form.exec_()
if result == 1:
atributos = form.feature().attributes()
self.iface.messageBar().pushInfo(u'Colector Infomra', u'Poligono agregado correctamente')
elif result == 0:
self.iface.messageBar().pushMessage(u'Colector Infomra', u'No se pudo colectar el Poligono',
level=Qgis.Warning)
layer.updateExtents()
self.tools.saveEdits(layer)
self.prj.addMapLayer(layer)
self.tools.stopEditing(layer, False)
self.canvas.refresh()
self.canvas.scene().removeItem(self.marker_polygon)
print("El polígono se compone por los siguientes vértices ", self.listaCoordenadas)
elif layer.geometryType() == QgsWkbTypes.LineGeometry:
points = []
layer.startEditing()
for iterador in self.listaCoordenadas:
if iterador[2] != 0 and iterador[1] != 0:
points.append(QgsPoint(iterador[2], iterador[1]))
field_idx = layer.fields().indexOf('id')
if field_idx == -1:
layer.addAttribute(QgsField('id', QVariant.Int))
field_idx = layer.fields().indexOf('id')
if layer.featureCount() == 0:
idActual = 1
elif layer.featureCount() > 0:
idActual = int(max([num[field_idx] for num in layer.getFeatures()])) + 1
config = layer.editFormConfig()
if not config.readOnly(field_idx):
config.setReadOnly(field_idx, True)
layer.setEditFormConfig(config)
feature = QgsFeature(layer.fields())
feature.setGeometry(QgsGeometry.fromPolyline(points))
print(field_idx)
feature.setAttribute(field_idx, idActual)
form = QgsAttributeDialog(layer, feature, False)
form.setMode(QgsAttributeEditorContext.AddFeatureMode)
result = form.exec_()
if result == 1:
atributos = form.feature().attributes()
self.iface.messageBar().pushInfo(u'Colector Infomra', u'Línea agregada correctamente')
elif result == 0:
self.iface.messageBar().pushMessage(u'Colector Infomra', u'No se pudo colectar la Línea',
level=Qgis.Warning)
layer.updateExtents()
self.tools.saveEdits(layer)
self.prj.addMapLayer(layer)
self.tools.stopEditing(layer, False)
self.canvas.refresh()
self.canvas.scene().removeItem(self.marker_polyline)
print("La línea se compone por los siguientes vértices ", self.listaCoordenadas)
self.create_txt_with_features(layer, attr=atributos)
self.tableVertices.clearContents()
self.tableVertices.setRowCount(0)
self.btnaddVertice.setEnabled(False)
self.btnendGeometry.setEnabled(False)
self.tableVertices.setEnabled(False)
self.mMapLayerComboBox.setEnabled(True)
self.btnubicarycentrar.setText("Colectar")
def create_vertex(self, gpsInfo):
if self.vertice and gpsInfo.isValid():
if gpsInfo.latitude != 0 and gpsInfo.longitude != 0:
self.tiempoCaptura = self.tiempoColeccion.currentText()
print("create vertex")
self.status_changed(gpsInfo)
id = len(self.listaCoordenadas) + 1
self.listaCoordenadas.append([id, gpsInfo.latitude, gpsInfo.longitude, gpsInfo.elevation])
rowPosition = self.tableVertices.rowCount()
self.tableVertices.setRowCount(rowPosition + 1)
self.tableVertices.setItem(rowPosition, 0, QTableWidgetItem(str(id)))
self.tableVertices.setItem(rowPosition, 1, QTableWidgetItem(str(gpsInfo.latitude)))
self.tableVertices.setItem(rowPosition, 2, QTableWidgetItem(str(gpsInfo.longitude)))
self.tableVertices.setItem(rowPosition, 3, QTableWidgetItem(str(gpsInfo.elevation)))
self.btnendGeometry.setEnabled(True)
self.vertice = False
if self.field_combo_box.layer().featureCount() == 0:
idActual = 1
elif self.field_combo_box.layer().featureCount() > 0:
idActual = int(max([num[0] for num in self.field_combo_box.layer().getFeatures()])) + 1
if self.field_combo_box.layer().geometryType() == QgsWkbTypes.LineGeometry:
if self.marker_polyline is None:
print(self.marker_polyline)
self.marker_polyline = QgsRubberBand(self.canvas, QgsWkbTypes.LineGeometry)
print(self.marker_polyline)
self.marker_polyline.addPoint(
QgsPointXY(self.listaCoordenadas[0][2], self.listaCoordenadas[0][1]))
self.marker_polyline.setColor(QColor(0, 0, 255, 80))
self.marker_polyline.setWidth(5)
self.marker_polyline.show()
# self.canvas.scene().removeItem(self.marker_polyline)
self.marker_polyline.addPoint(QgsPointXY(gpsInfo.longitude, gpsInfo.latitude))
elif self.field_combo_box.layer().geometryType() == QgsWkbTypes.PolygonGeometry:
if self.marker_polygon is None:
print(QColor(0, 0, 255, 127))
self.marker_polygon = QgsRubberBand(self.canvas, QgsWkbTypes.PolygonGeometry)
print(self.marker_polygon)
self.marker_polygon.addPoint(
QgsPointXY(self.listaCoordenadas[0][2], self.listaCoordenadas[0][1]))
self.marker_polygon.setColor(QColor(0, 0, 255, 80))
self.marker_polygon.setWidth(2)
self.marker_polygon.show()
# self.canvas.scene().removeItem(self.marker_polygon)
self.marker_polygon.addPoint(QgsPointXY(gpsInfo.longitude, gpsInfo.latitude))
self.deactivate()
future = concurrent.futures.ThreadPoolExecutor().submit(self.collect_ubx, self.field_combo_box.layer(), idActual, id)
progressMessageBar = self.iface.messageBar().createMessage("Colectando ubx...")
progress = QProgressBar()
progress.setMaximum(int(self.tiempoCaptura) + 1)
progressMessageBar.layout().addWidget(progress)
self.iface.messageBar().pushWidget(progressMessageBar, Qgis.Info)
for i in range(int(self.tiempoCaptura) + 1):
time.sleep(1)
progress.setValue(i + 1)
self.iface.messageBar().clearWidgets()
if future.result() == 0:
self.iface.messageBar().pushInfo(u'Colector Infomra', u'Vértice agregado')
elif not self.vertice:
self.deactivate()
def connection_failed(self):
if not self.gps_active:
print('GPS connection failed')
self.iface.messageBar().pushMessage(u'Colector Infomra', u'No ha sido posible conectar con el receptor',
level=Qgis.Critical)
self.deactivate()
def collect(self):
if self.field_combo_box.layer() is not None:
if self.btnubicarycentrar.text() == "Cancelar":
self.cancell()
else:
self.colectar = True
self.locate()
else:
self.iface.messageBar().pushMessage("Error", "Debe cargar una capa vectorial al proyecto", Qgis.Critical)
def create_txt_with_features(self, layer: QgsVectorLayer, attr):
#archivoTxt = self.pathFolder + layer.name() + "/" + layer.name() +".txt"
archivoTxt = "{0}{1}/{1}.txt".format(str(self.pathFolder), str(layer.name()))
fields = layer.fields()
if not os.path.exists(archivoTxt):
file = open(archivoTxt, "w")
#file.write(str(layer.fields().names()).replace('[', '').replace(']', '').replace(' ', '') + os.linesep)
else:
file = open(archivoTxt, "a")
lista = []
for iterador in range(len(attr)):
field = fields[iterador]
nombreField = field.name()
type = field.typeName()
atributo = attr[iterador]
lista.append(nombreField)
lista.append([atributo, type])
text = {lista[i]: lista[i+1] for i in range(0, len(lista), 2)}
file.write(str(text) + os.linesep)
file.close()
self.create_shapefile(layer)
def collect_ubx(self, layer: QgsVectorLayer, idActual: int, vertice: int = 1):
if not os.path.exists(self.pathFolder):
os.mkdir(self.pathFolder)
plugin_dir = os.path.dirname(__file__)
ruta = self.pathFolder + layer.name() + '/'
if layer.geometryType() == QgsWkbTypes.PointGeometry:
sentencia = "python3 {0}/collect_ubx.py colectar_punto {1} {2}.ubx {3}".format(str(plugin_dir), str(ruta),
str(idActual), str(self.tiempoCaptura))
else:
sentencia = "python3 {0}/collect_ubx.py colectar_punto {1} {2}_{3}.ubx {4}".format(str(plugin_dir), str(ruta),
str(idActual), str(vertice), str(self.tiempoCaptura))
bash = os.system(sentencia)
return bash
def add_new_vertex(self):
self.vertice = True
self.locate()
def create_shapefile(self, layer: QgsVectorLayer):
nameNewLayer = str(self.pathProject) + '/Export/' + str(layer.name()) + ".shp"
if not os.path.exists(nameNewLayer):
newLayer = QgsVectorFileWriter.writeAsVectorFormat(layer, nameNewLayer, 'utf-8',
QgsCoordinateReferenceSystem('EPSG: 4326'), driverName="ESRI Shapefile")
def cancell(self):
self.tableVertices.clearContents()
self.btnaddVertice.setEnabled(False)
self.btnendGeometry.setEnabled(False)
self.tableVertices.setEnabled(False)
self.mMapLayerComboBox.setEnabled(True)
self.btnubicarycentrar.setText("Ubicar y colectar")
self.tableVertices.setRowCount(0)
self.canvas.scene().removeItem(self.marker_polyline)
self.canvas.scene().removeItem(self.marker_polygon)
def closeEvent(self, event):
if self.canvas is not None:
self.canvas.scene().removeItem(self.marker)
self.closingPlugin.emit()
event.accept()
self.openProject = True
self.enableProject()
|
FFmpegPipeline.py
|
from modules.Pipeline import Pipeline # pylint: disable=import-error
from modules.PipelineManager import PipelineManager # pylint: disable=import-error
from common.utils import logging # pylint: disable=import-error
import string
import shlex
import subprocess
import time
import copy
from threading import Thread
import shutil
import uuid
logger = logging.get_logger('FFmpegPipeline', is_static=True)
if shutil.which('ffmpeg') is None:
raise Exception("ffmpeg not installed")
class FFmpegPipeline(Pipeline):
def __init__(self, id, config, models, request):
self.config = config
self.models = models
self.template = config['template']
self.id = id
self._process = None
self.start_time = None
self.stop_time = None
self._ffmpeg_launch_string = None
self.request = request
self.state = "QUEUED"
self.fps = None
def stop(self):
if self._process:
self.state = "ABORTED"
self._process.kill()
logger.debug("Setting Pipeline {id} State to ABORTED".format(id=self.id))
PipelineManager.pipeline_finished()
if self.state is "QUEUED":
PipelineManager.remove_from_queue(self.id)
self.state = "ABORTED"
logger.debug("Setting Pipeline {id} State to ABORTED and removing from the queue".format(id=self.id))
def params(self):
request = copy.deepcopy(self.request)
del(request["models"])
params_obj = {
"id": self.id,
"request": request,
"type": self.config["type"],
"launch_command": self._ffmpeg_launch_string
}
return params_obj
def status(self):
logger.debug("Called Status")
if self.stop_time is not None:
elapsed_time = self.stop_time - self.start_time
elif self.start_time is not None:
elapsed_time = time.time() - self.start_time
else:
elapsed_time = None
status_obj = {
"id": self.id,
"state": self.state,
"avg_fps": self.fps,
"start_time": self.start_time,
"elapsed_time": elapsed_time
}
return status_obj
def _spawn(self,args):
self.start_time = time.time()
self._process=subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, universal_newlines=True)
self.state = "RUNNING"
self._process.poll()
while self._process.returncode == None:
next_line = self._process.stderr.readline()
fps_idx = next_line.rfind('fps=')
q_idx = next_line.rfind('q=')
if fps_idx != -1 and q_idx != -1:
self.fps = int(float(next_line[fps_idx+4:q_idx].strip()))
self._process.poll()
self.stop_time = time.time()
if self.state != "ABORTED":
if self._process.returncode == 0:
self.state = "COMPLETED"
else:
self.state = "ERROR"
PipelineManager.pipeline_finished()
self._process = None
def _add_tags(self, iemetadata_args):
if "tags" in self.request:
try:
for key in self.request["tags"]:
iemetadata_args.append("-custom_tag")
iemetadata_args.append("%s:%s," % (key, self.request["tags"][key]))
if len(iemetadata_args):
# remove final comma
iemetadata_args[-1] = iemetadata_args[-1][:-1]
except Exception:
logger.error("Error adding tags")
def _add_default_parameters(self):
request_parameters = self.request.get("parameters", {})
pipeline_parameters = self.config.get("parameters", {})
for key in pipeline_parameters:
if (not key in request_parameters) and ("default" in pipeline_parameters[key]):
request_parameters[key] = pipeline_parameters[key]["default"]
self.request["parameters"] = request_parameters
def start(self):
logger.debug("Starting Pipeline {id}".format(id=self.id))
self.request["models"] = self.models
self._add_default_parameters()
self._ffmpeg_launch_string = string.Formatter().vformat(self.template, [], self.request)
args = ['ffmpeg']
args.extend(shlex.split(self._ffmpeg_launch_string))
iemetadata_args = ["-f", "iemetadata", "-source_url", self.request["source"]["uri"]]
self._add_tags(iemetadata_args)
if 'destination' in self.request:
if self.request['destination']['type'] == "kafka":
for item in self.request['destination']['hosts']:
iemetadata_args.append("kafka://"+item+"/"+self.request["destination"]["topic"])
elif self.request['destination']['type'] == "file":
iemetadata_args.append(self.request['destination']['uri'])
else:
iemetadata_args.append("file:///tmp/tmp"+str(uuid.uuid4().hex)+".json")
args.extend(iemetadata_args)
logger.debug(args)
thread = Thread(target=self._spawn, args=[args])
thread.start()
|
log.py
|
import json
import sys
import time
from pathlib2 import Path
from logging import LogRecord, getLogger, basicConfig, getLevelName, INFO, WARNING, Formatter, makeLogRecord, warning
from logging.handlers import BufferingHandler
from threading import Thread, Event
from six.moves.queue import Queue
from ...backend_api.services import events
from ...backend_api.session.session import MaxRequestSizeError
from ...config import config
buffer_capacity = config.get('log.task_log_buffer_capacity', 100)
class TaskHandler(BufferingHandler):
__flush_max_history_seconds = 30.
__wait_for_flush_timeout = 10.
__max_event_size = 1024 * 1024
__once = False
__offline_filename = 'log.jsonl'
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
self._task_id = value
def __init__(self, task, capacity=buffer_capacity):
super(TaskHandler, self).__init__(capacity)
self.task_id = task.id
self.session = task.session
self.last_timestamp = 0
self.counter = 1
self._last_event = None
self._exit_event = None
self._queue = None
self._thread = None
self._pending = 0
self._offline_log_filename = None
if task.is_offline():
offline_folder = Path(task.get_offline_mode_folder())
offline_folder.mkdir(parents=True, exist_ok=True)
self._offline_log_filename = offline_folder / self.__offline_filename
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
if self._task_id is None:
return False
# if we need to add handlers to the base_logger,
# it will not automatically create stream one when first used, so we must manually configure it.
if not TaskHandler.__once:
base_logger = getLogger()
if len(base_logger.handlers) == 1 and isinstance(base_logger.handlers[0], TaskHandler):
if record.name != 'console' and not record.name.startswith('trains.'):
base_logger.removeHandler(self)
basicConfig()
base_logger.addHandler(self)
TaskHandler.__once = True
else:
TaskHandler.__once = True
# if we passed the max buffer
if len(self.buffer) >= self.capacity:
return True
# if the first entry in the log was too long ago.
# noinspection PyBroadException
try:
if len(self.buffer) and (time.time() - self.buffer[0].created) > self.__flush_max_history_seconds:
return True
except Exception:
pass
return False
def _record_to_event(self, record):
# type: (LogRecord) -> events.TaskLogEvent
if self._task_id is None:
return None
timestamp = int(record.created * 1000)
if timestamp == self.last_timestamp:
timestamp += self.counter
self.counter += 1
else:
self.last_timestamp = timestamp
self.counter = 1
# ignore backspaces (they are often used)
full_msg = record.getMessage().replace('\x08', '')
return_events = []
while full_msg:
msg = full_msg[:self.__max_event_size]
full_msg = full_msg[self.__max_event_size:]
# unite all records in a single second
if self._last_event and timestamp - self._last_event.timestamp < 1000 and \
len(self._last_event.msg) + len(msg) < self.__max_event_size and \
record.levelname.lower() == str(self._last_event.level):
# ignore backspaces (they are often used)
self._last_event.msg += '\n' + msg
continue
# if we have a previous event and it timed out, return it.
new_event = events.TaskLogEvent(
task=self.task_id,
timestamp=timestamp,
level=record.levelname.lower(),
worker=self.session.worker,
msg=msg
)
if self._last_event:
return_events.append(self._last_event)
self._last_event = new_event
return return_events
def flush(self):
if self._task_id is None:
return
if not self.buffer:
return
buffer = None
self.acquire()
if self.buffer:
buffer = self.buffer
self.buffer = []
self.release()
if not buffer:
return
# noinspection PyBroadException
try:
record_events = [r for record in buffer for r in self._record_to_event(record)] + [self._last_event]
self._last_event = None
batch_requests = events.AddBatchRequest(requests=[events.AddRequest(e) for e in record_events if e])
except Exception:
self.__log_stderr("WARNING: trains.log - Failed logging task to backend ({:d} lines)".format(len(buffer)))
batch_requests = None
if batch_requests:
self._pending += 1
self._add_to_queue(batch_requests)
def _create_thread_queue(self):
if self._queue:
return
self._queue = Queue()
self._exit_event = Event()
self._exit_event.clear()
# multiple workers could be supported as well
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def _add_to_queue(self, request):
self._create_thread_queue()
self._queue.put(request)
def close(self, wait=False):
# self.__log_stderr('Closing {} wait={}'.format(os.getpid(), wait))
# flush pending logs
if not self._task_id:
return
# avoid deadlocks just skip the lock, we are shutting down anyway
self.lock = None
self.flush()
# shut down the TaskHandler, from this point onwards. No events will be logged
_thread = self._thread
self._thread = None
if self._queue:
self._exit_event.set()
self._queue.put(None)
self._task_id = None
if wait and _thread:
# noinspection PyBroadException
try:
timeout = 1. if self._queue.empty() else self.__wait_for_flush_timeout
_thread.join(timeout=timeout)
if not self._queue.empty():
self.__log_stderr('Flush timeout {}s exceeded, dropping last {} lines'.format(
timeout, self._queue.qsize()))
# self.__log_stderr('Closing {} wait done'.format(os.getpid()))
except Exception:
pass
# call super and remove the handler
super(TaskHandler, self).close()
def _send_events(self, a_request):
try:
self._pending -= 1
if self._offline_log_filename:
with open(self._offline_log_filename.as_posix(), 'at') as f:
f.write(json.dumps([b.to_dict() for b in a_request.requests]) + '\n')
return
# if self._thread is None:
# self.__log_stderr('Task.close() flushing remaining logs ({})'.format(self._pending))
res = self.session.send(a_request)
if res and not res.ok():
self.__log_stderr("failed logging task to backend ({:d} lines, {})".format(
len(a_request.requests), str(res.meta)), level=WARNING)
except MaxRequestSizeError:
self.__log_stderr("failed logging task to backend ({:d} lines) log size exceeded limit".format(
len(a_request.requests)), level=WARNING)
except Exception as ex:
self.__log_stderr("Retrying, failed logging task to backend ({:d} lines): {}".format(
len(a_request.requests), ex))
# we should push ourselves back into the thread pool
if self._queue:
self._pending += 1
self._queue.put(a_request)
def _daemon(self):
# multiple daemons are supported
leave = self._exit_event.wait(0)
request = True
while not leave or request:
# pull from queue
request = None
if self._queue:
# noinspection PyBroadException
try:
request = self._queue.get(block=not leave)
except Exception:
pass
if request:
self._send_events(request)
leave = self._exit_event.wait(0)
# self.__log_stderr('leaving {}'.format(os.getpid()))
@staticmethod
def __log_stderr(msg, level=INFO):
# output directly to stderr, make sure we do not catch it.
write = sys.stderr._original_write if hasattr(sys.stderr, '_original_write') else sys.stderr.write
write('{asctime} - {name} - {levelname} - {message}\n'.format(
asctime=Formatter().formatTime(makeLogRecord({})),
name='trains.log', levelname=getLevelName(level), message=msg))
@classmethod
def report_offline_session(cls, task, folder):
filename = Path(folder) / cls.__offline_filename
if not filename.is_file():
return False
with open(filename, 'rt') as f:
i = 0
while True:
try:
line = f.readline()
if not line:
break
list_requests = json.loads(line)
for r in list_requests:
r.pop('task', None)
i += 1
except StopIteration:
break
except Exception as ex:
warning('Failed reporting log, line {} [{}]'.format(i, ex))
batch_requests = events.AddBatchRequest(
requests=[events.TaskLogEvent(task=task.id, **r) for r in list_requests])
res = task.session.send(batch_requests)
if res and not res.ok():
warning("failed logging task to backend ({:d} lines, {})".format(
len(batch_requests.requests), str(res.meta)))
return True
|
serve.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import abc
import argparse
import json
import logging
import os
import platform
import signal
import socket
import sys
import threading
import time
import traceback
from six.moves import urllib
import uuid
from collections import defaultdict, OrderedDict
from itertools import chain, product
from multiprocessing import Process, Event
from localpaths import repo_root
from six.moves import reload_module
from manifest.sourcefile import read_script_metadata, js_meta_re, parse_variants
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve import config
from wptserve.logger import set_logger
from wptserve.handlers import filesystem_path, wrap_pipeline
from wptserve.utils import get_port, HTTPException, http2_compatible
from mod_pywebsocket import standalone as pywebsocket
EDIT_HOSTS_HELP = ("Please ensure all the necessary WPT subdomains "
"are mapped to a loopback device in /etc/hosts. "
"See https://github.com/web-platform-tests/wpt#running-the-tests "
"for instructions.")
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
def domains_are_distinct(a, b):
a_parts = a.split(".")
b_parts = b.split(".")
min_length = min(len(a_parts), len(b_parts))
slice_index = -1 * min_length
return a_parts[slice_index:] != b_parts[slice_index:]
class WrapperHandler(object):
__meta__ = abc.ABCMeta
headers = []
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
self.handler(request, response)
def handle_request(self, request, response):
for header_name, header_value in self.headers:
response.headers.set(header_name, header_value)
self.check_exposure(request)
path = self._get_path(request.url_parts.path, True)
query = request.url_parts.query
if query:
query = "?" + query
meta = "\n".join(self._get_meta(request))
script = "\n".join(self._get_script(request))
response.content = self.wrapper % {"meta": meta, "script": script, "path": path, "query": query}
wrap_pipeline(path, request, response)
def _get_path(self, path, resource_path):
"""Convert the path from an incoming request into a path corresponding to an "unwrapped"
resource e.g. the file on disk that will be loaded in the wrapper.
:param path: Path from the HTTP request
:param resource_path: Boolean used to control whether to get the path for the resource that
this wrapper will load or the associated file on disk.
Typically these are the same but may differ when there are multiple
layers of wrapping e.g. for a .any.worker.html input the underlying disk file is
.any.js but the top level html file loads a resource with a
.any.worker.js extension, which itself loads the .any.js file.
If True return the path to the resource that the wrapper will load,
otherwise return the path to the underlying file on disk."""
for item in self.path_replace:
if len(item) == 2:
src, dest = item
else:
assert len(item) == 3
src = item[0]
dest = item[2 if resource_path else 1]
if path.endswith(src):
path = replace_end(path, src, dest)
return path
def _get_metadata(self, request):
"""Get an iterator over script metadata based on // META comments in the
associated js file.
:param request: The Request being processed.
"""
path = self._get_path(filesystem_path(self.base_path, request, self.url_base), False)
try:
with open(path, "rb") as f:
for key, value in read_script_metadata(f, js_meta_re):
yield key, value
except IOError:
raise HTTPException(404)
def _get_meta(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._meta_replacement(key, value)
if replacement:
yield replacement
def _get_script(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._script_replacement(key, value)
if replacement:
yield replacement
@abc.abstractproperty
def path_replace(self):
# A list containing a mix of 2 item tuples with (input suffix, output suffix)
# and 3-item tuples with (input suffix, filesystem suffix, resource suffix)
# for the case where we want a different path in the generated resource to
# the actual path on the filesystem (e.g. when there is another handler
# that will wrap the file).
return None
@abc.abstractproperty
def wrapper(self):
# String template with variables path and meta for wrapper document
return None
@abc.abstractmethod
def _meta_replacement(self, key, value):
# Get the string to insert into the wrapper document, given
# a specific metadata key: value pair.
pass
@abc.abstractmethod
def check_exposure(self, request):
# Raise an exception if this handler shouldn't be exposed after all.
pass
class HtmlWrapperHandler(WrapperHandler):
global_type = None
headers = [('Content-Type', 'text/html')]
def check_exposure(self, request):
if self.global_type:
globals = b""
for (key, value) in self._get_metadata(request):
if key == b"global":
globals = value
break
if self.global_type not in parse_variants(globals):
raise HTTPException(404, "This test cannot be loaded in %s mode" %
self.global_type)
def _meta_replacement(self, key, value):
if key == b"timeout":
if value == b"long":
return '<meta name="timeout" content="long">'
if key == b"title":
value = value.decode('utf-8').replace("&", "&").replace("<", "<")
return '<title>%s</title>' % value
return None
def _script_replacement(self, key, value):
if key == b"script":
attribute = value.decode('utf-8').replace("&", "&").replace('"', """)
return '<script src="%s"></script>' % attribute
return None
class WorkersHandler(HtmlWrapperHandler):
global_type = b"dedicatedworker"
path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s%(query)s"));
</script>
"""
class WindowHandler(HtmlWrapperHandler):
path_replace = [(".window.html", ".window.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyHtmlHandler(HtmlWrapperHandler):
global_type = b"window"
path_replace = [(".any.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script>
self.GLOBAL = {
isWindow: function() { return true; },
isWorker: function() { return false; },
};
</script>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class SharedWorkersHandler(HtmlWrapperHandler):
global_type = b"sharedworker"
path_replace = [(".any.sharedworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s"));
</script>
"""
class ServiceWorkersHandler(HtmlWrapperHandler):
global_type = b"serviceworker"
path_replace = [(".any.serviceworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
(async function() {
const scope = 'does/not/exist';
let reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) await reg.unregister();
reg = await navigator.serviceWorker.register("%(path)s%(query)s", {scope});
fetch_tests_from_worker(reg.installing);
})();
</script>
"""
class AnyWorkerHandler(WrapperHandler):
headers = [('Content-Type', 'text/javascript')]
path_replace = [(".any.worker.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
importScripts("/resources/testharness.js");
%(script)s
importScripts("%(path)s");
done();
"""
def _meta_replacement(self, key, value):
return None
def _script_replacement(self, key, value):
if key == b"script":
attribute = value.decode('utf-8').replace("\\", "\\\\").replace('"', '\\"')
return 'importScripts("%s")' % attribute
if key == b"title":
value = value.decode('utf-8').replace("\\", "\\\\").replace('"', '\\"')
return 'self.META_TITLE = "%s";' % value
return None
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
class RoutesBuilder(object):
def __init__(self):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404))]
self.extra = []
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.extra
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_handler(self, method, route, handler):
self.extra.append((str(method), str(route), handler))
def add_static(self, path, format_args, content_type, route, headers=None):
if headers is None:
headers = {}
handler = handlers.StaticHandler(path, format_args, content_type, **headers)
self.add_handler("GET", str(route), handler)
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [
("GET", "*.worker.html", WorkersHandler),
("GET", "*.window.html", WindowHandler),
("GET", "*.any.html", AnyHtmlHandler),
("GET", "*.any.sharedworker.html", SharedWorkersHandler),
("GET", "*.any.serviceworker.html", ServiceWorkersHandler),
("GET", "*.any.worker.js", AnyWorkerHandler),
("GET", "*.asis", handlers.AsIsHandler),
("GET", "/.well-known/origin-policy", handlers.PythonScriptHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)
]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
"%s%s" % (url_base if url_base != "/" else "", suffix),
handler_cls(base_path=path, url_base=url_base)))
def add_file_mount_point(self, file_url, base_path):
assert file_url.startswith("/")
url_base = file_url[0:file_url.rfind("/") + 1]
self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))]
def build_routes(aliases):
builder = RoutesBuilder()
for alias in aliases:
url = alias["url-path"]
directory = alias["local-dir"]
if not url.startswith("/") or len(directory) == 0:
logger.error("\"url-path\" value must start with '/'.")
continue
if url.endswith("/"):
builder.add_mount_point(url, directory)
else:
builder.add_file_mount_point(url, directory)
return builder.get_routes()
class ServerProc(object):
def __init__(self, scheme=None):
self.proc = None
self.daemon = None
self.stop = Event()
self.scheme = scheme
def start(self, init_func, host, port, paths, routes, bind_address, config, **kwargs):
self.proc = Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_address,
config),
name='%s on port %s' % (self.scheme, port),
kwargs=kwargs)
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_address,
config, **kwargs):
try:
self.daemon = init_func(host, port, paths, routes, bind_address, config, **kwargs)
except socket.error:
print("Socket error on port %s" % port, file=sys.stderr)
raise
except Exception:
print(traceback.format_exc(), file=sys.stderr)
raise
if self.daemon:
try:
self.daemon.start(block=False)
try:
self.stop.wait()
except KeyboardInterrupt:
pass
except Exception:
print(traceback.format_exc(), file=sys.stderr)
raise
def wait(self):
self.stop.set()
self.proc.join()
def kill(self):
self.stop.set()
self.proc.terminate()
self.proc.join()
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(config):
paths = config.paths
bind_address = config.bind_address
aliases = config.aliases
host = config.server_host
port = get_port()
logger.debug("Going to use port %d to check subdomains" % port)
wrapper = ServerProc()
wrapper.start(start_http_server, host, port, paths, build_routes(aliases),
bind_address, config)
url = "http://{}:{}/".format(host, port)
connected = False
for i in range(10):
try:
urllib.request.urlopen(url)
connected = True
break
except urllib.error.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server "
"on {}. {}".format(url, EDIT_HOSTS_HELP))
sys.exit(1)
for domain in config.domains_set:
if domain == host:
continue
try:
urllib.request.urlopen("http://%s:%d/" % (domain, port))
except Exception:
logger.critical("Failed probing domain {}. {}".format(domain, EDIT_HOSTS_HELP))
sys.exit(1)
wrapper.wait()
def make_hosts_file(config, host):
rv = []
for domain in config.domains_set:
rv.append("%s\t%s\n" % (host, domain))
# Windows interpets the IP address 0.0.0.0 as non-existent, making it an
# appropriate alias for non-existent hosts. However, UNIX-like systems
# interpret the same address to mean any IP address, which is inappropraite
# for this context. These systems do not reserve any value for this
# purpose, so the inavailability of the domains must be taken for granted.
#
# https://github.com/web-platform-tests/wpt/issues/10560
if platform.uname()[0] == "Windows":
for not_domain in config.not_domains_set:
rv.append("0.0.0.0\t%s\n" % not_domain)
return "".join(rv)
def start_servers(host, ports, paths, routes, bind_address, config, **kwargs):
servers = defaultdict(list)
for scheme, ports in ports.items():
assert len(ports) == {"http": 2}.get(scheme, 1)
# If trying to start HTTP/2.0 server, check compatibility
if scheme == 'http2' and not http2_compatible():
logger.error('Cannot start HTTP/2.0 server as the environment is not compatible. ' +
'Requires Python 2.7.10+ (< 3.0) and OpenSSL 1.0.2+')
continue
for port in ports:
if port is None:
continue
init_func = {"http": start_http_server,
"https": start_https_server,
"http2": start_http2_server,
"ws": start_ws_server,
"wss": start_wss_server}[scheme]
server_proc = ServerProc(scheme=scheme)
server_proc.start(init_func, host, port, paths, routes, bind_address,
config, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def start_http_server(host, port, paths, routes, bind_address, config, **kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
def start_https_server(host, port, paths, routes, bind_address, config, **kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
def start_http2_server(host, port, paths, routes, bind_address, config, **kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
handler_cls=wptserve.Http2WebTestRequestHandler,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"),
http2=True)
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, bind_address, ssl_config):
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root]
if ssl_config is not None:
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"]]
if (bind_address):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self, block=False):
self.started = True
if block:
self.server.serve_forever()
else:
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def release_mozlog_lock():
try:
from mozlog.structuredlog import StructuredLogger
try:
StructuredLogger._lock.release()
except threading.ThreadError:
pass
except ImportError:
pass
def start_ws_server(host, port, paths, routes, bind_address, config, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
reload_module(logging)
release_mozlog_lock()
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
ssl_config=None)
def start_wss_server(host, port, paths, routes, bind_address, config, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
reload_module(logging)
release_mozlog_lock()
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
config.ssl_config)
def start(config, routes, **kwargs):
host = config["server_host"]
ports = config.ports
paths = config.paths
bind_address = config["bind_address"]
logger.debug("Using ports: %r" % ports)
servers = start_servers(host, ports, paths, routes, bind_address, config, **kwargs)
return servers
def iter_procs(servers):
for servers in servers.values():
for port, server in servers:
yield server.proc
def build_config(override_path=None, **kwargs):
rv = ConfigBuilder()
if kwargs.get("h2"):
rv._default["ports"]["http2"] = [9000]
if override_path and os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
with open(other_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
setattr(rv, key, value)
return rv
def _make_subdomains_product(s, depth=2):
return {u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))}
def _make_origin_policy_subdomains(limit):
return {u"op%d" % x for x in range(1,limit+1)}
_subdomains = {u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"}
_not_subdomains = {u"nonexistent"}
_subdomains = _make_subdomains_product(_subdomains)
# Origin policy subdomains need to not be reused by any other tests, since origin policies have
# origin-wide impacts like installing a CSP or Feature Policy that could interfere with features
# under test.
# See https://github.com/web-platform-tests/rfcs/pull/44.
_subdomains |= _make_origin_policy_subdomains(99)
_not_subdomains = _make_subdomains_product(_not_subdomains)
class ConfigBuilder(config.ConfigBuilder):
"""serve config
This subclasses wptserve.config.ConfigBuilder to add serve config options.
"""
_default = {
"browser_host": "web-platform.test",
"alternate_hosts": {
"alt": "not-web-platform.test"
},
"doc_root": repo_root,
"ws_doc_root": os.path.join(repo_root, "websockets", "handlers"),
"server_host": None,
"ports": {
"http": [8000, "auto"],
"https": [8443],
"ws": ["auto"],
"wss": ["auto"],
},
"check_subdomains": True,
"log_level": "debug",
"bind_address": True,
"ssl": {
"type": "pregenerated",
"encrypt_after_connect": False,
"openssl": {
"openssl_binary": "openssl",
"base_path": "_certs",
"password": "web-platform-tests",
"force_regenerate": False,
"duration": 30,
"base_conf_path": None
},
"pregenerated": {
"host_key_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.key"),
"host_cert_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.pem")
},
"none": {}
},
"aliases": []
}
computed_properties = ["ws_doc_root"] + config.ConfigBuilder.computed_properties
def __init__(self, *args, **kwargs):
if "subdomains" not in kwargs:
kwargs["subdomains"] = _subdomains
if "not_subdomains" not in kwargs:
kwargs["not_subdomains"] = _not_subdomains
super(ConfigBuilder, self).__init__(
*args,
**kwargs
)
with self as c:
browser_host = c.get("browser_host")
alternate_host = c.get("alternate_hosts", {}).get("alt")
if not domains_are_distinct(browser_host, alternate_host):
raise ValueError(
"Alternate host must be distinct from browser host"
)
def _get_ws_doc_root(self, data):
if data["ws_doc_root"] is not None:
return data["ws_doc_root"]
else:
return os.path.join(data["doc_root"], "websockets", "handlers")
def ws_doc_root(self, v):
self._ws_doc_root = v
ws_doc_root = property(None, ws_doc_root)
def _get_paths(self, data):
rv = super(ConfigBuilder, self)._get_paths(data)
rv["ws_doc_root"] = data["ws_doc_root"]
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
parser.add_argument("--alias_file", action="store", dest="alias_file",
help="File with entries for aliases/multiple doc roots. In form of `/ALIAS_NAME/, DOC_ROOT\\n`")
parser.add_argument("--h2", action="store_true", dest="h2",
help="Flag for enabling the HTTP/2.0 server")
parser.set_defaults(h2=False)
return parser
def run(**kwargs):
received_signal = threading.Event()
with build_config(os.path.join(repo_root, "config.json"),
**kwargs) as config:
global logger
logger = config.logger
set_logger(logger)
# Configure the root logger to cover third-party libraries.
logging.getLogger().setLevel(config.log_level)
def handle_signal(signum, frame):
logger.debug("Received signal %s. Shutting down.", signum)
received_signal.set()
bind_address = config["bind_address"]
if kwargs.get("alias_file"):
with open(kwargs["alias_file"], 'r') as alias_file:
for line in alias_file:
alias, doc_root = [x.strip() for x in line.split(',')]
config["aliases"].append({
'url-path': alias,
'local-dir': doc_root,
})
if config["check_subdomains"]:
check_subdomains(config)
stash_address = None
if bind_address:
stash_address = (config.server_host, get_port(""))
logger.debug("Going to use port %d for stash" % stash_address[1])
with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
servers = start(config, build_routes(config["aliases"]), **kwargs)
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
while all(item.is_alive() for item in iter_procs(servers)) and not received_signal.is_set():
for item in iter_procs(servers):
item.join(1)
exited = [item for item in iter_procs(servers) if not item.is_alive()]
subject = "subprocess" if len(exited) == 1 else "subprocesses"
logger.info("%s %s exited:" % (len(exited), subject))
for item in iter_procs(servers):
logger.info("Status of %s:\t%s" % (item.name, "running" if item.is_alive() else "not running"))
def main():
kwargs = vars(get_parser().parse_args())
return run(**kwargs)
|
usage.py
|
import uuid
import time
import hashlib
import os
import getpass
import json
import logging
import socket
import sys
import platform
from parsl.multiprocessing import ForkProcess
from parsl.version import VERSION as PARSL_VERSION
logger = logging.getLogger(__name__)
def async_process(fn):
""" Decorator function to launch a function as a separate process """
def run(*args, **kwargs):
proc = ForkProcess(target=fn, args=args, kwargs=kwargs, name="Usage-Tracking")
proc.start()
return proc
return run
@async_process
def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):
"""Send UDP messages to usage tracker asynchronously
This multiprocessing based messenger was written to overcome the limitations
of signalling/terminating a thread that is blocked on a system call. This
messenger is created as a separate process, and initialized with 2 queues,
to_send to receive messages to be sent to the internet.
Args:
- domain_name (str) : Domain name string
- UDP_IP (str) : IP address YYY.YYY.YYY.YYY
- UDP_PORT (int) : UDP port to send out on
- sock_timeout (int) : Socket timeout
- to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
"""
try:
if message is None:
raise ValueError("message was none")
encoded_message = bytes(message, "utf-8")
if encoded_message is None:
raise ValueError("utf-8 encoding of message failed")
if domain_name:
try:
UDP_IP = socket.gethostbyname(domain_name)
except Exception:
# (False, "Domain lookup failed, defaulting to {0}".format(UDP_IP))
pass
if UDP_IP is None:
raise Exception("UDP_IP is None")
if UDP_PORT is None:
raise Exception("UDP_PORT is None")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.settimeout(sock_timeout)
sock.sendto(bytes(message, "utf-8"), (UDP_IP, UDP_PORT))
sock.close()
except socket.timeout:
logger.debug("Failed to send usage tracking data: socket timeout")
except OSError as e:
logger.debug("Failed to send usage tracking data: OSError: {}".format(e))
except Exception as e:
logger.debug("Failed to send usage tracking data: Exception: {}".format(e))
class UsageTracker (object):
"""Anonymized Usage Tracking for Parsl.
Client for this is here : https://github.com/Parsl/parsl_tracking
This issue captures the discussion that went into functionality
implemented here : https://github.com/Parsl/parsl/issues/34
"""
def __init__(self, dfk, ip='52.3.111.203', port=50077,
domain_name='tracking.parsl-project.org'):
"""Initialize usage tracking unless the user has opted-out.
We will try to resolve the hostname specified in kwarg:domain_name
and if that fails attempt to use the kwarg:ip. Determining the
IP and sending message is threaded to avoid slowing down DFK
initialization.
Tracks usage stats by inspecting the internal state of the dfk.
Args:
- dfk (DFK object) : Data Flow Kernel object
KWargs:
- ip (string) : IP address
- port (int) : Port number, Default:50077
- domain_name (string) : Domain name, will override IP
Default: tracking.parsl-project.org
"""
self.domain_name = domain_name
self.ip = ip
# The sock timeout will only apply to UDP send and not domain resolution
self.sock_timeout = 5
self.UDP_PORT = port
self.UDP_IP = None
self.procs = []
self.dfk = dfk
self.config = self.dfk.config
self.uuid = str(uuid.uuid4())
self.parsl_version = PARSL_VERSION
self.python_version = "{}.{}.{}".format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro)
self.tracking_enabled = self.check_tracking_enabled()
logger.debug("Tracking status: {}".format(self.tracking_enabled))
self.initialized = False # Once first message is sent this will be True
def check_tracking_enabled(self):
"""By default tracking is enabled.
Tracking is disabled if :
1. config["globals"]["usageTracking"] is set to False (Bool)
2. Environment variable PARSL_TRACKING is set to false (case insensitive)
"""
track = True # By default we track usage
if not self.config.usage_tracking:
track = False
envvar = str(os.environ.get("PARSL_TRACKING", True)).lower()
if envvar == "false":
track = False
return track
def construct_start_message(self):
"""Collect preliminary run info at the start of the DFK.
Returns :
- Message dict dumped as json string, ready for UDP
"""
uname = getpass.getuser().encode('latin1')
hashed_username = hashlib.sha256(uname).hexdigest()[0:10]
hname = socket.gethostname().encode('latin1')
hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]
message = {'uuid': self.uuid,
'uname': hashed_username,
'hname': hashed_hostname,
'test': False, # this field previously indicated if parsl
# was being run in test mode, and is
# retained for protocol compatibility
'parsl_v': self.parsl_version,
'python_v': self.python_version,
'os': platform.system(),
'os_v': platform.release(),
'start': time.time()}
return json.dumps(message)
def construct_end_message(self):
"""Collect the final run information at the time of DFK cleanup.
Returns:
- Message dict dumped as json string, ready for UDP
"""
app_count = self.dfk.task_count
site_count = len([x for x in self.dfk.config.executors if x.managed])
app_fails = self.dfk.tasks_failed_count + self.dfk.tasks_dep_fail_count
message = {'uuid': self.uuid,
'end': time.time(),
't_apps': app_count,
'sites': site_count,
'c_time': None,
'failed': app_fails,
'test': False, # see comment in construct_start_message
}
return json.dumps(message)
def send_UDP_message(self, message):
"""Send UDP message."""
x = 0
if self.tracking_enabled:
try:
proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)
self.procs.append(proc)
except Exception as e:
logger.debug("Usage tracking failed: {}".format(e))
else:
x = -1
return x
def send_message(self):
"""Send message over UDP.
If tracking is disables, the bytes_sent will always be set to -1
Returns:
(bytes_sent, time_taken)
"""
start = time.time()
message = None
if not self.initialized:
message = self.construct_start_message()
self.initialized = True
else:
message = self.construct_end_message()
self.send_UDP_message(message)
end = time.time()
return end - start
def __del__(self):
return self.close()
def close(self):
"""We terminate (SIGTERM) the processes added to the self.procs list """
for proc in self.procs:
proc.terminate()
|
registry.py
|
import logging
import threading
import time
from typing import List
from brownie import Contract, chain, web3
from joblib import Parallel, delayed
from web3._utils.abi import filter_by_name
from web3._utils.events import construct_event_topic_set
from yearn.events import create_filter, decode_logs, get_logs_asap
from yearn.multicall2 import fetch_multicall
from yearn.prices import magic
from yearn.utils import Singleton, contract_creation_block, contract
from yearn.v2.vaults import Vault
from yearn.networks import Network
from yearn.exceptions import UnsupportedNetwork
from yearn.decorators import sentry_catch_all, wait_or_exit_before, wait_or_exit_after
logger = logging.getLogger(__name__)
class Registry(metaclass=Singleton):
def __init__(self, watch_events_forever=True):
self.releases = {} # api_version => template
self._vaults = {} # address -> Vault
self._experiments = {} # address => Vault
self.governance = None
self.tags = {}
self._watch_events_forever = watch_events_forever
self.registries = self.load_registry()
# load registry state in the background
self._done = threading.Event()
self._has_exception = False
self._thread = threading.Thread(target=self.watch_events, daemon=True)
self._thread.start()
def load_registry(self):
if chain.id == Network.Mainnet:
return self.load_from_ens()
elif chain.id == Network.Fantom:
return [contract('0x727fe1759430df13655ddb0731dE0D0FDE929b04')]
elif chain.id == Network.Arbitrum:
return [contract('0x3199437193625DCcD6F9C9e98BDf93582200Eb1f')]
else:
raise UnsupportedNetwork('yearn v2 is not available on this network')
def load_from_ens(self):
# track older registries to pull experiments
resolver = contract('0x4976fb03C32e5B8cfe2b6cCB31c09Ba78EBaBa41')
topics = construct_event_topic_set(
filter_by_name('AddressChanged', resolver.abi)[0],
web3.codec,
{'node': web3.ens.namehash('v2.registry.ychad.eth')},
)
events = decode_logs(get_logs_asap(str(resolver), topics))
logger.info('loaded %d registry versions', len(events))
return [Contract(event['newAddress']) for event in events]
@property
@wait_or_exit_before
def vaults(self) -> List[Vault]:
return list(self._vaults.values())
@property
@wait_or_exit_before
def experiments(self) -> List[Vault]:
return list(self._experiments.values())
@wait_or_exit_before
def __repr__(self) -> str:
return f"<Registry chain={chain.id} releases={len(self.releases)} vaults={len(self.vaults)} experiments={len(self.experiments)}>"
@wait_or_exit_after
def load_vaults(self):
if not self._thread._started.is_set():
self._thread.start()
@sentry_catch_all
def watch_events(self):
start = time.time()
self.log_filter = create_filter([str(addr) for addr in self.registries])
while True:
logs = self.log_filter.get_new_entries()
self.process_events(decode_logs(logs))
if not self._done.is_set():
self._done.set()
logger.info("loaded v2 registry in %.3fs", time.time() - start)
if not self._watch_events_forever:
return
time.sleep(300)
def process_events(self, events):
for event in events:
logger.debug("%s %s %s", event.address, event.name, dict(event))
if event.name == "NewGovernance":
self.governance = event["governance"]
if event.name == "NewRelease":
self.releases[event["api_version"]] = contract(event["template"])
if event.name == "NewVault":
# experiment was endorsed
if event["vault"] in self._experiments:
vault = self._experiments.pop(event["vault"])
vault.name = f"{vault.vault.symbol()} {event['api_version']}"
self._vaults[event["vault"]] = vault
logger.debug("endorsed vault %s %s", vault.vault, vault.name)
# we already know this vault from another registry
elif event["vault"] not in self._vaults:
vault = self.vault_from_event(event)
vault.name = f"{vault.vault.symbol()} {event['api_version']}"
self._vaults[event["vault"]] = vault
logger.debug("new vault %s %s", vault.vault, vault.name)
if event.name == "NewExperimentalVault":
vault = self.vault_from_event(event)
vault.name = f"{vault.vault.symbol()} {event['api_version']} {event['vault'][:8]}"
self._experiments[event["vault"]] = vault
logger.debug("new experiment %s %s", vault.vault, vault.name)
if event.name == "VaultTagged":
self.tags[event["vault"]] = event["tag"]
def vault_from_event(self, event):
return Vault(
vault=Contract.from_abi("Vault", event["vault"], self.releases[event["api_version"]].abi),
token=event["token"],
api_version=event["api_version"],
registry=self,
watch_events_forever=self._watch_events_forever,
)
def load_strategies(self):
# stagger loading strategies to not run out of connections in the pool
vaults = self.vaults + self.experiments
Parallel(8, "threading")(delayed(vault.load_strategies)() for vault in vaults)
def load_harvests(self):
vaults = self.vaults + self.experiments
Parallel(8, "threading")(delayed(vault.load_harvests)() for vault in vaults)
def describe(self, block=None):
vaults = self.active_vaults_at(block)
results = Parallel(8, "threading")(delayed(vault.describe)(block=block) for vault in vaults)
return {vault.name: result for vault, result in zip(vaults, results)}
def total_value_at(self, block=None):
vaults = self.active_vaults_at(block)
prices = Parallel(8, "threading")(delayed(magic.get_price)(str(vault.token), block=block) for vault in vaults)
results = fetch_multicall(*[[vault.vault, "totalAssets"] for vault in vaults], block=block)
return {vault.name: assets * price / vault.scale for vault, assets, price in zip(vaults, results, prices)}
def active_vaults_at(self, block=None):
vaults = self.vaults + self.experiments
if block:
vaults = [vault for vault in vaults if contract_creation_block(str(vault.vault)) <= block]
# fixes edge case: a vault is not necessarily initialized on creation
activations = fetch_multicall(*[[vault.vault, 'activation'] for vault in vaults], block=block)
return [vault for vault, activation in zip(vaults, activations) if activation]
def wallets(self, block=None):
return set(vault.wallets(block) for vault in self.active_vaults_at(block))
|
raft_node.py
|
import rpyc
import threading
from random import randint
from rpyc.utils.server import ThreadedServer
from config_reader import ConfigReader
from node_dao import NodeDAO
import logging
import sys
import os
# States of Raft node
LEADER = "LEADER"
FOLLOWER = "FOLLOWER"
CANDIDATE = "CANDIDATE"
# AppendRPC return states
SUCCESS = "SUCCESS"
TERM_INCONSISTENCY = "TERM_INCONSISTENCY"
NEXT_INDEX_INCONSISTENCY = "NEXT_INDEX_INCONSISTENCY"
INVALID_COMMIT_INDEX = -2
# AppendRPC type
BLOG_REPLICATION = "BLOG_REPLICATION"
JOINT_CONFIGURATION = "JOINT_CONFIGURATION"
NEW_CONFIGURATION = "NEW_CONFIGURATION"
class RaftService(rpyc.Service):
config_reader = ConfigReader("../config/config.ini")
node_dao = NodeDAO()
# stable log => (index, term, value)
voted_for, term, stable_log, blog = node_dao.initialize_persistence_files(-1, -1, list(), list())
# Initializing commit_index based on blog
# If blog is empty, nothing committed
if not blog:
commit_index = -1
# Else, commit index is the last index of the blog
else:
commit_index = len(blog) - 1
# Server log setup
logger = logging.getLogger("raft_node")
log_handler = logging.FileHandler("../log/raft_node.log")
formatter = logging.Formatter("%(levelname)s %(message)s")
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
logger.setLevel(logging.INFO)
state = FOLLOWER
electionTimer = 0
heartBeatTimer = 0
deathBeatTimer = 0
server_id = int(config_reader.get_configuration("CurrentServer", "sid"))
id_ip_port = config_reader.get_server_parameters("Server" + str(server_id))
total_nodes = int(config_reader.get_total_nodes())
timeout_parameter = int(config_reader.get_election_timeout_period())
peers = config_reader.get_peers(server_id, total_nodes)
heartBeatInterval = int(config_reader.get_heartbeat_interval())
majority_criteria = int(config_reader.get_majority_criteria())
interrupt = False
leader_id = -1
should_i_die = False
am_i_getting_updated = False
# Duplicate Config information to smooth out config change
majority_criteria_old = int(config_reader.get_majority_criteria())
total_nodes_old = int(config_reader.get_total_nodes())
peers_old = config_reader.get_peers(server_id, total_nodes)
peers_to_remove = list()
@staticmethod
def check_majority(votes):
# During normal operation the criteria are the same values
# During config change they will be different values
if votes >= RaftService.majority_criteria and votes >= RaftService.majority_criteria_old:
return True
else:
return False
def switch_to_joint_config(self, new_majority, new_total_nodes, new_peers):
RaftService.majority_criteria_old = RaftService.majority_criteria
RaftService.total_nodes_old = RaftService.total_nodes
RaftService.peers_old = RaftService.peers
RaftService.majority_criteria = new_majority
RaftService.total_nodes = new_total_nodes
RaftService.peers = new_peers
def switch_to_new_config(self):
# Assumes you are running in Joint config mode
RaftService.majority_criteria_old = RaftService.majority_criteria
RaftService.total_nodes_old = RaftService.total_nodes
RaftService.peers_old = RaftService.peers
if RaftService.server_id != RaftService.leader_id:
reducted_peers = list()
for peer in RaftService.peers:
flag = True
for remove_id in RaftService.peers_to_remove:
if peer[0] == remove_id:
flag = False
if flag:
reducted_peers.append(peer)
RaftService.peers_old = reducted_peers
RaftService.peers = reducted_peers
RaftService.peers_to_remove = list()
if RaftService.server_id != RaftService.leader_id:
#Update and persist the new configuration
self.config_reader.update_config_file(RaftService.server_id, RaftService.total_nodes, RaftService.majority_criteria, RaftService.peers)
if RaftService.should_i_die and RaftService.server_id != RaftService.leader_id:
RaftService.logger.info("Stepping down as I am not part of new config")
#os._exit(0)
RaftService.start_deathbeat_timer()
def convert_to_string(self, log_entry):
config_change_list = log_entry[1]
mode = log_entry[0]
string_of_config = mode
for item_to_replicate in config_change_list:
command = item_to_replicate[0]
string_of_config = string_of_config + " " +str(command) + " "+str(item_to_replicate[1])
if command == "ADD":
string_of_config = string_of_config + " " + str(item_to_replicate[2]) + " "+ str(item_to_replicate[3])
return string_of_config
def on_connect(self):
# code that runs when a new connection is created
pass
def on_disconnect(self):
# code that runs when a connection closes
pass
@staticmethod
def start_election_timer():
# Election timeout to be a random value between T and 2T
timeout = randint(RaftService.timeout_parameter, 2 * RaftService.timeout_parameter)
RaftService.electionTimer = threading.Timer(timeout, RaftService.start_election)
RaftService.electionTimer.start()
@staticmethod
def start_election():
RaftService.logger.info("Starting election for server %s" % (RaftService.server_id))
RaftService.state = CANDIDATE
RaftService.term = RaftService.term + 1
RaftService.voted_for = RaftService.server_id
RaftService.node_dao.persist_vote_and_term(RaftService.voted_for, RaftService.term)
total_votes = RaftService.request_votes()
# Check Majority
if total_votes == -1:
RaftService.logger.warning("Voting was interrupted by external factor")
RaftService.state = FOLLOWER
RaftService.reset_and_start_timer()
elif RaftService.check_majority(total_votes):
RaftService.leader_id = RaftService.server_id
RaftService.state = LEADER
# Send HeartBeat immediately and then setup regular heartbeats
RaftService.start_heartbeat_timer()
RaftService.logger.info("Successfully elected New Leader %s " % RaftService.leader_id)
else:
# Step Down
RaftService.state = FOLLOWER
RaftService.reset_and_start_timer()
@staticmethod
def start_heartbeat_timer():
# Once I'm the LEADER, start sending heartbeat messages to peers
RaftService.heartBeatTimer = threading.Timer(RaftService.heartBeatInterval, RaftService.trigger_next_heartbeat)
RaftService.heartBeatTimer.start()
@staticmethod
def start_deathbeat_timer():
RaftService.deathBeatTimer = threading.Timer(3, RaftService.trigger_death)
RaftService.deathBeatTimer.start()
@staticmethod
def trigger_next_heartbeat():
if RaftService.state == LEADER:
threading.Thread(target=RaftService.start_heartbeat_timer).start()
RaftService.send_heartbeat()
@staticmethod
def trigger_death():
if RaftService.should_i_die:
os._exit(0)
@staticmethod
def send_heartbeat():
# Connect to peers and send heartbeats
for peer in RaftService.peers:
try:
connection = rpyc.connect(peer[1], peer[2], config={"allow_public_attrs": True})
connection.root.append_entriesRPC(leaders_term=RaftService.term,
leaders_id=RaftService.server_id,
previous_log_index=None,
previous_log_term=None,
entries=None,
commit_index=RaftService.commit_index)
except Exception as details:
#// RaftService.logger.warning("send_heartbeat: Unable to connect to server %d" % peer[0])
pass
@staticmethod
def reset_and_start_timer():
RaftService.electionTimer.cancel()
RaftService.start_election_timer()
@staticmethod
def request_votes():
total_votes = 0
last_index, last_term = RaftService.get_last_log_index_and_term()
# TODO Run this concurrently
# Suggestion: Create a separate RPC call to handle response. This RPC only requests for vote.
# For now we assume that the network wont fail
for peer in RaftService.peers:
try:
vote = False
connection = rpyc.connect(peer[1], peer[2], config={"allow_public_attrs": True})
vote = connection.root.requestRPC(term=RaftService.term,
candidate_id=RaftService.server_id,
last_log_index=last_index,
last_log_term=last_term)
RaftService.logger.info("Vote received: " + str(vote))
if vote:
RaftService.logger.info("Received vote from server %d for leader election, term %d"
% (peer[0], RaftService.term))
total_votes = total_votes + 1
except Exception as details:
#// RaftService.logger.warning("request_votes: Unable to connect to server %d" % peer[0]) # +1 to account for self-vote
pass
return total_votes + 1
def is_part_of_cluster(self, candidate_id):
part_of_cluster = False
for peer in RaftService.peers:
if peer[0] == candidate_id:
part_of_cluster = True
break
return part_of_cluster
def exposed_requestRPC(self, term, candidate_id, last_log_index, last_log_term):
my_vote = False
if not self.is_part_of_cluster(candidate_id):
return my_vote
RaftService.logger.info("Received requestRPC: candidate term: %d, my_term: %d" % (term, RaftService.term))
if RaftService.term == term:
# Check if I had voted to this candidate previously for this term. If YES, re-iterate my vote
if RaftService.voted_for == candidate_id:
my_vote = True
RaftService.node_dao.persist_vote_and_term(RaftService.voted_for, RaftService.term)
else:
RaftService.logger.info("Server %s has already vote this term (%s) to %s" % (
RaftService.server_id, RaftService.term, RaftService.voted_for))
elif term < RaftService.term:
RaftService.logger.info("Stale term of candidate %s" % candidate_id)
elif term > RaftService.term:
log_index, log_term = self.get_last_log_index_and_term()
RaftService.logger.info(
"In requestRPC: candidate_last_log_term: %d, my_last_log_term: %d, candidate_last_log_index: %d, my_last_log_index: %d" % (
last_log_term, log_term, last_log_index, log_index))
if last_log_term >= log_term and last_log_index >= log_index:
my_vote = True
RaftService.reset_and_start_timer()
RaftService.logger.info("Voting YES to candidate %s" % candidate_id)
# TODO Need Review on this
RaftService.term = term
RaftService.voted_for = candidate_id
RaftService.state = FOLLOWER
RaftService.node_dao.persist_vote_and_term(RaftService.voted_for, RaftService.term)
else:
RaftService.logger.warning("Something went wrong. Shouldn't print this...")
return my_vote
def exposed_config_changeRPC(self, list_of_config_changes, client_id):
new_config_change_success = False
RaftService.logger.info("Received Configuration Change Request from client %s" % client_id)
if RaftService.server_id != RaftService.leader_id:
try:
RaftService.logger.info("Redirecting the request to Leader %s" % RaftService.server_id)
(ip, port) = RaftService.config_reader.get_leaders_port_ip(RaftService.leader_id, RaftService.peers)
connection = rpyc.connect(ip, port, config={"allow_public_attrs": True})
new_config_change_success = connection.root.exposed_config_change_leaderRPC(list_of_config_changes, client_id)
except Exception as details:
RaftService.logger.info(details)
else:
entry = (JOINT_CONFIGURATION, list_of_config_changes)
### Apply Joint Configuration
self.run_config_change(entry)
joint_config_change_success = self.append_entries(entry, client_id)
if joint_config_change_success:
# Joint consensus is running. So start new config now
###Apply new configuration
self.run_config_change((NEW_CONFIGURATION,list_of_config_changes))
new_config_change_success = self.append_entries((NEW_CONFIGURATION,list_of_config_changes), client_id)
if new_config_change_success:
RaftService.trigger_next_heartbeat()
RaftService.logger.info("Successfully changed the configuration of the system.")
if RaftService.server_id == RaftService.leader_id:
reducted_peers = list()
for peer in RaftService.peers:
flag = True
for remove_id in RaftService.peers_to_remove:
if peer[0] == remove_id:
flag = False
if flag:
reducted_peers.append(peer)
RaftService.peers_old = reducted_peers
RaftService.peers = reducted_peers
RaftService.peers_to_remove = list()
if RaftService.server_id == RaftService.leader_id:
self.config_reader.update_config_file(RaftService.server_id, RaftService.total_nodes, RaftService.majority_criteria,RaftService.peers)
if RaftService.should_i_die and RaftService.server_id == RaftService.leader_id:
RaftService.logger.info("Stepping down as I am not part of new config")
#os._exit(0)
RaftService.start_deathbeat_timer()
else:
RaftService.logger.info("Couldn't change the configuration of system to new config.")
else:
RaftService.logger.info("Couldn't change the configuration of the system to joint config.")
return new_config_change_success
def exposed_config_change_leaderRPC(self, list_of_config_changes, client_id):
new_config_change_success = False
RaftService.logger.info("Received Configuration via Redirection from client %s" % client_id)
entry = (JOINT_CONFIGURATION, list_of_config_changes)
### Apply Joint Configuration
self.run_config_change(entry)
joint_config_change_success = self.append_entries((JOINT_CONFIGURATION,list_of_config_changes), client_id)
if joint_config_change_success:
# Joint consensus is running. So start new config now
###Apply new configuration
self.run_config_change((NEW_CONFIGURATION,list_of_config_changes))
new_config_change_success = self.append_entries((NEW_CONFIGURATION,list_of_config_changes), client_id)
if new_config_change_success:
RaftService.trigger_next_heartbeat()
RaftService.logger.info("Successfully changed the configuration of the system.")
if RaftService.server_id == RaftService.leader_id:
reducted_peers = list()
for peer in RaftService.peers:
flag = True
for remove_id in RaftService.peers_to_remove:
if peer[0] == remove_id:
flag = False
if flag:
reducted_peers.append(peer)
RaftService.peers_old = reducted_peers
RaftService.peers = reducted_peers
RaftService.peers_to_remove = list()
if RaftService.server_id == RaftService.leader_id:
self.config_reader.update_config_file(RaftService.server_id, RaftService.total_nodes, RaftService.majority_criteria,RaftService.peers)
if RaftService.should_i_die and RaftService.server_id == RaftService.leader_id:
RaftService.logger.info("Stepping down as I am not part of new config")
#os._exit(0)
RaftService.start_deathbeat_timer()
else:
RaftService.logger.info("Couldn't change the configuration of system to new config.")
else:
RaftService.logger.info("Couldn't change the configuration of the system to joint config.")
return new_config_change_success
def exposed_lookupRPC(self):
blogs = RaftService.blog
return blogs
def exposed_postRPC(self, blog, client_id):
return_value = False
RaftService.logger.info("Received Post from client %s" % client_id)
if RaftService.server_id != RaftService.leader_id:
try:
(ip, port) = RaftService.config_reader.get_leaders_port_ip(RaftService.leader_id, RaftService.peers)
connection = rpyc.connect(ip, port, config={"allow_public_attrs": True})
return_value = connection.root.exposed_post_leaderRPC(blog, client_id)
except Exception as details:
RaftService.logger.info(details)
else:
return_value = self.append_entries(blog, client_id)
return return_value
def exposed_post_leaderRPC(self, blog, client_id):
RaftService.logger.info("Received Post from client %s" % client_id)
return self.append_entries(blog, client_id)
def append_entries(self, item_to_replicate, client_id):
# This code is to be executed by the LEADER
# The driver of this method is Client or Followers forwarding client requests
# 1 Replicate the item_to_replicate
# (index, term, value, commit_status)
previous_log_index, previous_log_term = RaftService.get_last_log_index_and_term()
RaftService.logger.info("Prev Index %s Prev Term %s" % (previous_log_index, previous_log_term))
entry = (previous_log_index + 1, RaftService.term, item_to_replicate)
if not isinstance (item_to_replicate, basestring):
string_config = self.convert_to_string(item_to_replicate)
entry = (previous_log_index + 1, RaftService.term, string_config)
RaftService.stable_log.append(entry)
entry = (previous_log_index + 1, RaftService.term, item_to_replicate)
RaftService.node_dao.persist_log(RaftService.stable_log)
entries = list()
entries.append(entry)
# 2 Send RPCs and wait for majority
if RaftService.state == LEADER:
total_votes = self.replicate_log(entries, previous_log_index, previous_log_term) + 1
if RaftService.check_majority(total_votes):
RaftService.logger.info(
"Reached consensus to replicate %s, %s" % (previous_log_index + 1, RaftService.term))
self.update_state_machine(RaftService.commit_index + 1)
else:
RaftService.logger.info("Reached no majority")
else:
RaftService.logger.warning("I aint no leader. Somebody called me by accident!")
return True
def replicate_log(self, entries, prev_log_index, prev_log_term):
total_votes = 0
# TODO Redundant Code Ah Man!
for peer in RaftService.peers:
# TODO For now, as the dude doesnt fail, the entries are what client asks to replicate
# TODO Remove this dangerous guy at once!
# TODO Does it make sense to sleep for a while and try again network failure errors
previous_log_index = prev_log_index
previous_log_term = prev_log_term
while True:
try:
connection = rpyc.connect(peer[1], peer[2], config={"allow_public_attrs": True})
term, status, next_index = connection.root.append_entriesRPC(leaders_term=RaftService.term,
leaders_id=RaftService.server_id,
previous_log_index=previous_log_index,
previous_log_term=previous_log_term,
entries=entries,
commit_index=RaftService.commit_index)
if status == SUCCESS:
RaftService.logger.info("Received Success from %s" % peer[0])
total_votes = total_votes + 1
break
elif status == TERM_INCONSISTENCY or status == NEXT_INDEX_INCONSISTENCY:
RaftService.logger.info("Received term inconsistency from %s. Next index %s Term %s" % (
peer[0], term, next_index))
entries, previous_log_term = self.get_entries_from_index((next_index - 1))
previous_log_index = next_index - 1
else:
RaftService.logger.warning("Shouldn't have reached here. something is wrong")
except Exception as details:
#// RaftService.logger.warning("replicate_log: Unable to connect to server %d" % peer[0])
break
#TODO put the thread to sleep and try again. Cos we try again
return total_votes + 1
def get_entries_from_index(index):
entries = list()
tuple_ = RaftService.stable_log[index]
previous_log_term = tuple_[1]
for i in range(index + 1, len(RaftService.stable_log)):
entries.append(RaftService.stable_log[i])
return entries, previous_log_term
def run_config_change(self, log_entry):
config_change_list = log_entry[1]
mode = log_entry[0]
RaftService.logger.info("Running configuration change now -%s"%mode)
if JOINT_CONFIGURATION == mode:
new_total_nodes = RaftService.total_nodes
new_peers = RaftService.peers_old
for item_to_replicate in config_change_list:
command = item_to_replicate[0]
local_id = int(item_to_replicate[1])
if command == "ADD":
ip = item_to_replicate[2]
port = int(item_to_replicate[3])
new_peers.append((local_id, ip, port))
new_total_nodes = new_total_nodes + 1
elif command == "REMOVE":
if (local_id == RaftService.server_id):
RaftService.should_i_die = True
RaftService.peers_to_remove.append(local_id)
#new_peers = self.config_reader.get_new_peers_by_removing(local_id, new_peers)
new_total_nodes = new_total_nodes - 1
else:
print "Reached else condition."
new_majority_criteria = int(new_total_nodes / 2) + 1
self.switch_to_joint_config(new_majority_criteria, new_total_nodes, new_peers)
elif NEW_CONFIGURATION == mode:
self.switch_to_new_config()
else:
RaftService.logger.info("Wrong mode called for applying to state machine")
def get_new_config(self, config_change_list):
new_peers = RaftService.peers
new_total_nodes = RaftService.total_nodes
for item_to_replicate in config_change_list:
command = item_to_replicate[0]
id = item_to_replicate[1]
if command == "ADD":
ip = item_to_replicate[2]
port = item_to_replicate[3]
new_peers.append((id, ip, port))
new_total_nodes = new_total_nodes + 1
elif command == "REMOVE":
if (id == RaftService.server_id):
RaftService.should_i_die = True
new_peers = self.config_reader.get_new_peers_by_removing(id, new_peers)
new_total_nodes = new_total_nodes - 1
new_majority_criteria = int(new_total_nodes / 2) + 1
return new_majority_criteria, new_total_nodes, new_peers
def exposed_append_entriesRPC(self,
leaders_term,
leaders_id,
previous_log_index,
previous_log_term,
entries,
commit_index):
#// RaftService.logger.info("In method append entries RPC %s"%entries)
# TODO Isn't this for heartbeat alone? Seems like overkill @SURAJ
# AppendRPC received, need to reset my election timer
RaftService.reset_and_start_timer()
# If my term is less than leader's, update my term
if leaders_term > RaftService.term:
# real LEADER sent me an appendRPC, may be I am an old leader who needs to be neutralized
if RaftService.state == LEADER:
RaftService.heartBeatTimer.cancel()
RaftService.state = FOLLOWER
RaftService.term = leaders_term
# If I am the CANDIDATE step down
if RaftService.state == CANDIDATE:
RaftService.state = FOLLOWER
if entries is not None: # Not a heartbeat, entries to append
RaftService.logger.info("Received appendRPC from %d" % leaders_id)
# Get my last log index and last log index term
my_prev_log_index, my_prev_log_entry_term = RaftService.get_last_log_index_and_term()
my_next_index = my_prev_log_index + 1
# Check if next index matches. If not, send Inconsistency error and next index of the Follower
if previous_log_index != my_prev_log_index:
my_next_index = my_next_index - 1
RaftService.logger.info("Reply to AppendRPC: Sending NEXT_INDEX_INCONSISTENCY to %d" % leaders_id)
return (RaftService.term, NEXT_INDEX_INCONSISTENCY, my_next_index)
# Check if previous log entry matches previous log term
# If not, send Term Inconsistency error and next index of the Follower
if previous_log_term != my_prev_log_entry_term:
my_next_index = my_next_index - 1
RaftService.logger.info("Reply to AppendRPC: Sending TERM_INCONSISTENCY to %d" % leaders_id)
return (RaftService.term, TERM_INCONSISTENCY, my_next_index)
# Log consistency check successful. Append entries to log, persist on disk, send SUCCESS
for entry in entries:
config_change = entry[2]
if not isinstance(config_change, basestring):
self.run_config_change(config_change)
string_config = self.convert_to_string(config_change)
new_entry = (entry[0],entry[1],string_config)
RaftService.stable_log.append(new_entry)
else:
RaftService.stable_log.append(entry)
my_next_index = my_next_index + 1
RaftService.node_dao.persist_log(RaftService.stable_log)
RaftService.logger.info("Log after appending ...")
self.print_stable_log()
RaftService.logger.info("Reply to AppendRPC: Sending SUCCESS to %d" % leaders_id)
return (RaftService.term, SUCCESS, my_next_index)
else:
if RaftService.leader_id != leaders_id:
RaftService.leader_id = leaders_id
#// RaftService.logger.info("Received HeartBeat from %d, my leader is %d" % (leaders_id, RaftService.leader_id))
if RaftService.commit_index < commit_index:
self.update_state_machine(commit_index)
def update_state_machine(self, leaders_commit_index):
RaftService.logger.info("In update state machine. My commit_index: %d Leader commit_index: %d My stable log length: %d" % (RaftService.commit_index, leaders_commit_index, len(RaftService.stable_log)))
# Check if stable_log exists till leaders commit_index
# This case exists for LEADERS and FOLLOWERS with updated logs(FOLLOWERS who haven't failed)
if leaders_commit_index <= (len(RaftService.stable_log) - 1):
new_blogs = list()
for i in range(len(RaftService.blog),leaders_commit_index+1):
current_entry = RaftService.stable_log[i]
value = current_entry[2]
if not isinstance(value, basestring):
value = "CONFIG_CHANGE"
new_blogs.append(value)
RaftService.logger.info("Appending %s", new_blogs)
RaftService.blog = RaftService.blog + new_blogs
# Persist blog
RaftService.node_dao.persist_blog(RaftService.blog)
# Update my commit_index
RaftService.commit_index = len(RaftService.blog) - 1
# This case is True for FOLLOWERS who failed and are just up
elif (leaders_commit_index > (len(RaftService.stable_log) - 1) and not RaftService.am_i_getting_updated):
RaftService.am_i_getting_updated = True
self.fix_my_log()
RaftService.am_i_getting_updated = False
else:
# RaftService.logger.warning("Ignoring update...")
pass
def fix_my_log(self):
missing_logs = list()
# Get LEADER connection parameters
try:
for peer in RaftService.peers:
if peer[0] == RaftService.leader_id:
leader = peer
break
except Exception as details:
print details
# Connect to LEADER and get missing updated logs
try:
RaftService.logger.info("Trying to connect to leader")
connection = rpyc.connect(leader[1], leader[2], config={"allow_public_attrs": True})
missing_logs, leaders_commit_index = connection.root.fix_log_RPC(follower_commit_index=RaftService.commit_index)
RaftService.logger.info("Missing logs sent from LEADER: %s Leader Commit index: %d" % (missing_logs, leaders_commit_index))
except Exception as details:
print details
RaftService.logger.warning("fix_my_log: Unable to connect to LEADER %d" % leader[0])
# Check if problem obtaining missing log (Return code: INVALID_COMMIT_INDEX)
if leaders_commit_index != INVALID_COMMIT_INDEX:
# Update log and persist
try:
RaftService.stable_log = RaftService.stable_log[:(RaftService.commit_index+1)]
for log in missing_logs:
RaftService.stable_log.append(log)
except Exception as details:
print details
RaftService.logger.info("New logs: %s" %RaftService.stable_log)
RaftService.node_dao.persist_log(RaftService.stable_log)
# Update blog and persist
if leaders_commit_index <= (len(RaftService.stable_log) - 1):
new_blogs = list()
for i in range(len(RaftService.blog),leaders_commit_index+1):
current_entry = RaftService.stable_log[i]
value = current_entry[2]
if not isinstance(value, basestring):
value = "CONFIG_CHANGE"
new_blogs.append(value)
RaftService.logger.info("Appending %s", new_blogs)
RaftService.blog = RaftService.blog + new_blogs
# Persist blog
RaftService.node_dao.persist_blog(RaftService.blog)
# Update my commit_index
RaftService.commit_index = len(RaftService.blog) - 1
def exposed_fix_log_RPC(self, follower_commit_index):
missing_log = list()
RaftService.logger.info("In fix_log_RPC: My commit index: %d, Follower commit_index: %d" % (RaftService.commit_index, follower_commit_index))
# Make sure follower commit index is indeed less than my commit index
if follower_commit_index <= RaftService.commit_index:
# Send (missing log from his commit index) and my current commit index
missing_log = RaftService.stable_log[(follower_commit_index + 1):]
RaftService.logger.info("Sending missing log: %s to follower" % missing_log)
return (missing_log, RaftService.commit_index)
else:
RaftService.logger.warning("exposed_fix_log_RPC: Something's wrong..")
return (missing_log, INVALID_COMMIT_INDEX)
@staticmethod
def get_last_log_index_and_term():
tuple = 0, 0, 0, False
# If stable_log is not empty
if RaftService.stable_log:
tuple = RaftService.stable_log[-1]
return tuple[0], tuple[1]
def print_stable_log(self):
for tuple in RaftService.stable_log:
RaftService.logger.info("%s %s %s" % (tuple[0], tuple[1], tuple[2]))
if __name__ == "__main__":
RaftService.logger.info(
"Starting Server %d with Peers %s Term: %d, Voted_for: %d, Stable log: %s, Blog: %s, Commit Index: %d" % (
RaftService.server_id, RaftService.peers, RaftService.term, RaftService.voted_for, RaftService.stable_log,
RaftService.blog, RaftService.commit_index))
RaftService.start_election_timer()
my_port = RaftService.id_ip_port[2]
t = ThreadedServer(RaftService, port=my_port, protocol_config={"allow_public_attrs": True})
t.start()
|
__main__.py
|
# -*- coding: utf-8 -*-
# 2021/2/1
# create by: snower
import sys
import os
import multiprocessing
def show_help_message(filename):
sys.argv[0] = filename
with open(filename) as fp:
exec(fp.read())
print()
if __name__ == "__main__":
if "-h" not in sys.argv:
exit(0)
filepath = os.path.dirname(__file__)
filenames = []
for filename in os.listdir(filepath):
if filename[:2] == "__" or filename[-3:] != ".py":
continue
filenames.append(filename)
print('usage: -m [HELPER_NAME] [ARGS]\r\n')
print('simple sevent helpers \r\n')
print("can use helpers:\r\n\r\n" + '\r\n'.join(["sevent.helpers." + filename[:-3] for filename in filenames]))
print('\r\n\r\n' + '*' * 64 + '\r\n')
for filename in filenames:
p = multiprocessing.Process(target=show_help_message, args=(filepath + os.path.sep + filename,))
p.start()
p.join()
print('\r\n\r\n' + '*' * 64 + '\r\n')
|
img_preprocess.py
|
import threading, queue, time
from queue import Queue
from threading import Thread, currentThread
from concurrent.futures import ThreadPoolExecutor
import os
from CalibrateTransfer.img_operation import ScreenSHot_batch
from CalibrateTransfer.data_preprocess import write_data_to_json_file,read_data_from_json_file,make_dir,read_subdata,read_stack_data
from CalibrateTransfer.cv_transfer import transform_2d_to_3d,object_To_pixel,updata_img_point
from CalibrateTransfer.img_operation import GenerateRect
from FairMot.lib.tracker.multitracker import JDETracker, create_JDETracker_model
import numpy as np
import torch.utils.data as data
import torch
import torch.multiprocessing as mp
from utils.sort_by_point import sort_by_point
import logging
from utils.log import Log
from utils.timer import Timer
from utils.dir_related_operation import makedir_v1
import cv2
class SubImgDetect(data.Dataset): # for sub img detection
def __init__(self,video, video_time, rect, Output_size, InQueue , img_size=(1088, 608), ):
self.width, self.height = img_size[0] , img_size[1] # 网络输入的Feature Map的大小
[self.vw, self.vh] = Output_size # 输入图片的大小
[self.w, self.h] = Output_size # 可视化的图片的大小
self.rect = rect # 对应的目标区域 [x_l,y_l,x_r,y_r]
self.count = 0
self.InQueue = InQueue
# self.vn = 2 *multiple * self.frame_rate + 1
print('Lenth of the video: {:d} frames'.format(self.vn))
def __iter__(self):
self.count = -1
return self
def __next__(self):
# Read image
res, img0 = self.InQueue.get()
assert res is False, 'Failed to load frame {:d}'.format(self.count)
# Normalize RGB
img = img0[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
self.count += 1
if self.count == len(self):# 结束迭代的标志 输入队列空了,且结束标志为True
raise StopIteration
return self.count, img, img0
def __len__(self):
return self.vn # number of files
class Calibrate_transfer():
def __init__(self,opt, detector_opt, Tracker_output_queue,C_T_output_queue,S_Coordinate_transfer,S_Pose_Estimate, vis=False, save_results=False ,queueSize=1024):
self.logger = Log(__name__, 'Calibrate_transfer' ).getlog()
self.opt = opt
self.dir_name = opt.dir_name
self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))
# logger.info('目标文件夹是{}'.format(self.root_path))
self.file_name = opt.file_name
# 本来就是要载入两次视频,分开读亦可以
self.Videoparameters, \
self.setting_parameter, \
self.action_datas, \
self.channel_list, \
self.parameter = read_data_from_json_file(self.root_path, self.file_name, self.opt)
self.datalen = len(self.action_datas)
self.detector_opt = detector_opt # 用来设置追踪器参数的。
self.logger.info('Creating model...')
self.detector_model = create_JDETracker_model(self.detector_opt)
self.detector = JDETracker(self.detector_opt, self.detector_model) # What is JDE Tracker? 把这个tracker 当detector用
self.input_Q = Tracker_output_queue # 追踪数据的整体输入
self.PreProcess_Q = Queue(maxsize=queueSize) # 在目标检测前,对左边转换后的截图进行预处理
self.tracking_Q = Queue(maxsize=queueSize)
self.detecions_Q = Queue(maxsize=queueSize)
self.output_Q = C_T_output_queue
self.vis = vis
if self.vis == True:
self.vis_path = os.path.join(self.root_path, 'vis')
os.makedirs(self.vis_path, exist_ok=True)
self.S_Coordinate_transfer = S_Coordinate_transfer
self.S_Pose_Estimate = S_Pose_Estimate
self.save_results = save_results
if self.save_results == True:
self.intermediate_results_dir = os.path.join(self.root_path, 'intermediate_results','Calibrate_transfer')
os.makedirs(self.intermediate_results_dir, exist_ok=True)
def Read_From_Cache(self):
'''
从文件把之前计算过的结果提取出来
'''
from utils.index_operation import get_index
self.logger.debug('The pid of Calibrate_transfer.Read_From_Cache() : {}'.format(os.getpid()))
self.logger.debug('The thread of Calibrate_transfer.Read_From_Cache() : {}'.format(currentThread()))
cache_index = get_index(self.intermediate_results_dir)
# 只需读取有用的部分即可。
action_index = self.S_Pose_Estimate
for action_index in range(self.S_Pose_Estimate,self.S_Coordinate_transfer):
if action_index not in cache_index:
# cache 中没有保存说明 此动作本身是False
self.output_Q.put((False,(action_index, [], [],[],[])))
else:
# 从文件夹中读取出该动作对应的计算结果。
_, sub_img_tracking,ReID_features_tracking,sub_imgs_detection,ReID_features_detection = self.load_intermediate_resutls(action_index)
self.output_Q.put((True, (action_index, sub_img_tracking ,ReID_features_tracking,sub_imgs_detection,ReID_features_detection)))
self.logger.log(22, ' Calibrate_transfer loads action {} from Cache file '.format(action_index))
def update_(self):
self.t_update = Thread(target=self.update, args=())
self.t_update.daemon = True
self.t_update.start()
def update(self):
'''
将一个视角下的所有图片转换到其他视角下。
'''
self.logger.debug('The pid of Calibrate_transfer.update() : {}'.format(os.getpid()))
self.logger.debug('The thread of Calibrate_transfer.update() : {}'.format(currentThread()))
update_timer = Timer()
sub_img_generate_timer = Timer()
for action_index in range(self.S_Coordinate_transfer,self.datalen):
update_timer.tic() # 开始计时
self.logger.debug('update() ======================================== action {}'.format(action_index))
Flag, input_index ,tracking_results = self.input_Q.get()
if input_index != action_index:
self.logger.log(31,'---——————————————————————————————————index does match')
raise Exception('Calibrate_transfer.update action_index_update {} != input_index {} '.format(action_index, input_index))
if Flag == False:
# Flag == False 的话,直接就不要了
self.tracking_Q.put((False, (action_index, [], [])))
self.PreProcess_Q.put((False, (action_index, [])))
continue
frames_time, sub_imgs, ReID_feature_list, img_points = tracking_results
# 分为 追踪结果和 对 每一帧追踪进行坐标转换后得到的检测结果
# 这里先将追踪结果存入队列中。
self.tracking_Q.put((True,(action_index, sub_imgs, ReID_feature_list)))
channel,action_time,img_point,video_parameter = read_subdata(self.action_datas[action_index], self.Videoparameters)
calibrateParameter = video_parameter['CalibrateParameter']
# 将追踪结果对应的像素坐标转换成世界坐标
'''队列的首项是终极目标,用于校准,不用于后续的坐标转换计算'''
'''因此,直接从第二项开始'''
world_points = []
start_time = frames_time[1] # 追踪序列开始的时间,这里的时间是相对于开球时间
for p_index in range(1, len(img_points)):
img_point = img_points[p_index]
# 输入的是连续的轨迹,因为检测原因,可能有诺干帧是没有img_points,长度因此为0
if len(img_point) == 0:
world_points.append(None)
else:
world_point = transform_2d_to_3d(img_point, calibrateParameter.cameraMatrix, calibrateParameter.distCoeffs,
calibrateParameter.rotation_vector,
calibrateParameter.translation_vector, world_z=0)
world_point = np.reshape(world_point, [1, 3])
world_points.append(world_point)
# 将世界坐标转换到其他的视角下,并且 截图+detection\
# print('len(world_points) : ', len(world_points))
sub_img_generate_timer.tic()
self.sub_img_generate_multi_thread(channel,action_index,world_points,start_time)
# self.logger.info('Calibrate_transfer.sub_img_generate() action {} consums {}s'.format(action_index,sub_img_generate_timer.toc()))
self.logger.log(22,'Calibrate_transfer.update() action {} consums {}s'.format(action_index,update_timer.toc()))
def sub_img_generate(self,video_parameter, setting_parameter, world_points,start_time):
'''
基于世界坐标,生成其他视角下的像素坐标
'''
results = []
video = video_parameter['video']
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# 将时间调整至追踪序列的开头,然后逐帧读取
time = start_time + video_parameter['delta_t'] # action time need to add the delta time to calibrate the time between channels .
video.set(cv2.CAP_PROP_POS_MSEC, round(1000 * time))
for index in range(len(world_points)):
_, img = video.read()
world_point = world_points[index]
if type(world_point) != np.ndarray:
continue
img_point_of_other = object_To_pixel(world_point, video_parameter['CalibrateParameter'])
img_point_of_other = np.reshape(img_point_of_other, 2)
Message = ScreenSHot_batch(img_point_of_other, img , setting_parameter, width, height)
if Message[0] == True:
# print('sub img of channel {} candidate {} exists'.format(other_channel,img_count_))
image = Message[1]
reference_point = Message[2]
sub_imgs_bias = Message[3]
results.append([index,image,reference_point,sub_imgs_bias])
else:
continue
return results
def sub_img_generate_multi_thread(self,channel,action_index, world_points,start_time):
'''
基于世界坐标,生成其他视角下的像素坐标
'''
results_all = []
executor = ThreadPoolExecutor(max_workers=len(self.channel_list)-1)
task_list = []
for other_channel in self.channel_list:
# 同一个视角,无需在生成截图
if other_channel == channel:
continue
video_parameter = self.Videoparameters[other_channel]
setting_parameter = self.setting_parameter
task = executor.submit(self.sub_img_generate, video_parameter,setting_parameter,world_points,start_time)
task_list.append(task)
for task in task_list:
while not task.done():
time.sleep(1)
results_all += task.result()
if len(results_all) > 0 :
self.PreProcess_Q.put((True,(action_index,results_all)))
else:
self.PreProcess_Q.put((False,(action_index,results_all)))
def detect_(self):
self.t_detect = Thread(target=self.detect, args=())
self.t_detect.daemon = True
self.t_detect.start()
def detect(self):
'''
用检测其检测每一场图片中的人物
'''
detect_timer = Timer()
self.logger.debug('The pid of Calibrate_transfer.detect() : {}'.format(os.getpid()))
self.logger.debug('The thread of Calibrate_transfer.detect() : {}'.format(currentThread()))
for action_index in range(self.S_Coordinate_transfer,self.datalen):
self.logger.debug('Calibrate_transfer.Detection ------------action {} has been read '.format(action_index))
Flag_PreProcess, (acton_index, Preprocess_results) = self.PreProcess_Q.get()
detect_timer.tic()
results = []
if Flag_PreProcess == False:
self.detecions_Q.put((False,(acton_index,results)))
continue
# 争取写成并行的
for [index,img0,reference_point,sub_img_bias] in Preprocess_results:
# Img preprocess before detection
img = img0[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
# detection using tracker kernel
# dets = [xl, yl, w, h]
[dets,id_feature] = self.detector.update_for_detection(blob, img0)
if dets.shape[0] == 0 :
#截图中没有检测到人物, 继续
continue
results.append([img0,dets,id_feature,reference_point,sub_img_bias])
if len(results) > 0 :
self.detecions_Q.put((True,(acton_index, results)))
else:
self.detecions_Q.put((False,(acton_index,results)))
self.logger.log(22,'Calibrate_transfer.detect() action {} consums {}s'.format(action_index,detect_timer.toc()))
def postProcess_(self):
self.t_postProcess = Thread(target=self.postProcess, args=())
self.t_postProcess.daemon = True
self.t_postProcess.start()
def postProcess(self):
'''
对检测完之后的结果进行后处理
'''
postProcess_timer = Timer()
self.logger.debug('The pid of Calibrate_transfer.postProcess() : {}'.format(os.getpid()))
self.logger.debug('The thread of Calibrate_transfer.postProcess() : {}'.format(currentThread()))
for action_index in range(self.S_Coordinate_transfer,self.datalen):
self.logger.debug('postProcess ------------action {} has been read '.format(action_index))
Flag_detect, (acton_index_detection, results) = self.detecions_Q.get()
Flag_tracking, (action_index_tracking,sub_imgs_tracking, ReID_features_tracking) = self.tracking_Q.get()
postProcess_timer.tic()
if Flag_detect == False or Flag_tracking == False:
self.output_Q.put((False,(action_index, [], [],[],[] )))
continue
elif acton_index_detection != action_index or action_index_tracking != action_index:
raise Exception('acton_index_detection {} != action_index_tracking {} '.format(acton_index_detection, action_index_tracking))
if self.vis == True:
vis_dir_ = os.path.join(self.vis_path, '{}'.format(action_index), 'Calibrate_transfer')
makedir_v1(vis_dir_)
# 把每个sub_box提取出来。
sub_imgs_detection = []
ReID_features_detection = []
# 对所有结果进行筛选,选出和目标人物相同ID的。
for r_index, [img0,dets,id_feature,reference_point,sub_img_bias] in enumerate(results):
I_h, I_w, _ = img0.shape
new_reference_point, target_id = sort_by_point([acton_index_detection,dets,False],reference_point,input_index='{}_{}'.format(action_index,r_index))
if target_id == None:
'''根据reference_point来筛选框时,没有合适的框'''
if self.vis == True:
vis_img = np.copy(img0)
for cv2_index in range(int(dets.shape[0])):
box = dets[cv2_index].tolist()
x1, y1, w, h = box
c_intbox = tuple(map(int, (max(0, x1), max(0, y1), min(x1 + w, I_w), min(y1 + h, I_h))))
cv2.rectangle(vis_img, (c_intbox[0], c_intbox[1]), (c_intbox[2], c_intbox[3]), (255, 0, 0), thickness=2)
cv2.circle(vis_img, (int(reference_point[0]), int(reference_point[1])), radius=5, color=(0, 0, 255),thickness=-1) # 原始点为红色
cv2.imwrite(os.path.join(vis_dir_, '{}.jpg'.format(r_index)), vis_img)
continue
# print('dets.shape, target_id : ',dets.shape, target_id)
target_bbox = dets[target_id]
# print('target_bbox.shape : ', target_bbox.shape)
target_bbox = target_bbox.tolist()
# print('target_bbox : ', target_bbox)
x1, y1, w, h = target_bbox
# 目标区域
intbox = tuple(map(int, (max(0, x1), max(0, y1), min(x1+w, I_w), min(y1+h, I_h))))
sub_img = img0[intbox[1]:intbox[3], intbox[0]:intbox[2]]
# ids = np.arryy(result[2])
target_feature = id_feature[target_id]
sub_imgs_detection.append(sub_img)
ReID_features_detection.append(target_feature)
if self.vis == True:
vis_img = np.copy(img0)
for cv2_index in range(int(dets.shape[0])):
box = dets[cv2_index].tolist()
x1, y1, w, h = box
c_intbox = tuple(map(int, (max(0, x1), max(0, y1), min(x1 + w, I_w), min(y1 + h, I_h))))
cv2.rectangle(vis_img, (c_intbox[0], c_intbox[1]), (c_intbox[2], c_intbox[3]), (255, 0, 0), thickness=2)
cv2.circle(vis_img, (int(reference_point[0]), int(reference_point[1])), radius=5, color=(0, 0, 255),thickness=-1) # 原始点为红色
cv2.circle(vis_img, (int(new_reference_point[0]), int(new_reference_point[1])), radius=5, color=(0, 255, 255), thickness=-1)
cv2.rectangle(vis_img, (intbox[0], intbox[1]), (intbox[2], intbox[3]), (0, 255, 255), thickness=2)
cv2.imwrite(os.path.join(vis_dir_, '{}.jpg'.format(r_index)), vis_img)
# 可以在此处加一个 ReID 模块 ,用于剔除劣质 sub_imgs
sub_imgs = sub_imgs_detection + sub_imgs_tracking
ReID_features = ReID_features_detection + ReID_features_tracking
self.output_Q.put((True,(action_index, sub_imgs_tracking, ReID_features_tracking,sub_imgs_detection,ReID_features_detection)))
# 保存中间结果
if self.save_results==True:
self.save_intermediate_resutls(action_index, sub_imgs, ReID_features,
sub_imgs_detection, sub_imgs_tracking,
ReID_features_detection,ReID_features_tracking)
self.logger.log(22,'Calibrate_transfer.postProcess() action {} consums {}s'.format(action_index,postProcess_timer.toc()))
# self.logger.log(22, '-----------------------------Finished Calibrate_transfer.postProcess() datalen = {}-----------------------------'.format(self.datalen))
def save_intermediate_resutls(self,action_index, sub_imgs, ReID_features,
sub_imgs_detection, sub_imgs_tracking,
ReID_features_detection,ReID_features_tracking):
'''将每一次计算的结果保存下来。'''
intermediate_resutls_path = os.path.join(self.intermediate_results_dir,'{}'.format(action_index))
os.makedirs(intermediate_resutls_path,exist_ok=True)
ReID_features = np.array(ReID_features)
np.save(os.path.join(intermediate_resutls_path,'{}_ReID_features.npy'.format(action_index)),ReID_features)
for img_index in range(len(sub_imgs)):
cv2.imwrite(os.path.join(intermediate_resutls_path,'{}.jpg'.format(img_index)),sub_imgs[img_index])
# 保存tracking部分的img和feature
intermediate_resutls_path_tracking = os.path.join(self.intermediate_results_dir,'{}/tracking'.format(action_index))
os.makedirs(intermediate_resutls_path_tracking, exist_ok=True)
ReID_features_tracking = np.array(ReID_features_tracking)
np.save(
os.path.join(intermediate_resutls_path_tracking, '{}_ReID_features_tracking.npy'.format(action_index)),
ReID_features_tracking)
for img_index_tracking in range(len(sub_imgs_tracking)):
cv2.imwrite(os.path.join(intermediate_resutls_path_tracking, '{}.jpg'.format(img_index_tracking)),
sub_imgs_tracking[img_index_tracking])
# 保存detection部分的img和feature
intermediate_resutls_path_detection = os.path.join(self.intermediate_results_dir, '{}/detection'.format(action_index))
os.makedirs(intermediate_resutls_path_detection, exist_ok=True)
ReID_features_detection = np.array(ReID_features_detection)
np.save(
os.path.join(intermediate_resutls_path_detection, '{}_ReID_features_detection.npy'.format(action_index)), ReID_features_detection)
for img_index_detection in range(len(sub_imgs_detection)):
cv2.imwrite(os.path.join(intermediate_resutls_path_detection, '{}.jpg'.format(img_index_detection)), sub_imgs_detection[img_index_detection])
def load_intermediate_resutls(self,action_index):
'''将中间结果读取出来'''
intermediate_resutls_path = os.path.join(self.intermediate_results_dir,'{}'.format(action_index))
ReID_features = np.load(os.path.join(intermediate_resutls_path,'{}_ReID_features.npy'.format(action_index)))
ReID_features = [ _ for _ in ReID_features ] # 转换为我们需要的格式
# 把这个文件夹下的图片名称读出来。
sub_imgs_names = [ img_name for img_name in os.listdir(intermediate_resutls_path) if img_name.split('.')[-1] == 'jpg' ]
# 把图片名字按升序排列
sub_imgs_names = sorted(sub_imgs_names, key=lambda img_index : int(img_index.split('.')[0]))
sub_imgs = []
for img_name in sub_imgs_names:
sub_img = cv2.imread(os.path.join(intermediate_resutls_path,img_name))
sub_imgs.append(sub_img)
# 读取追踪部分
intermediate_resutls_path_tracking = os.path.join(intermediate_resutls_path, 'tracking')
ReID_features_tracking = np.load(os.path.join(intermediate_resutls_path_tracking, '{}_ReID_features_tracking.npy'.format(action_index)))
ReID_features_tracking = [_ for _ in ReID_features_tracking] # 转换为我们需要的格式
# 把这个文件夹下的图片名称读出来。
sub_imgs_names_tracking = [img_name_tracking for img_name_tracking in
os.listdir(intermediate_resutls_path_tracking) if
img_name_tracking.split('.')[-1] == 'jpg']
# 把图片名字按升序排列
sub_imgs_names_tracking = sorted(sub_imgs_names_tracking, key=lambda img_index: int(img_index.split('.')[0]))
sub_imgs_tracking = []
for img_name_tracking in sub_imgs_names_tracking:
sub_img_tracking = cv2.imread(os.path.join(intermediate_resutls_path_tracking, img_name_tracking))
sub_imgs_tracking.append(sub_img_tracking)
# 读取 坐标转换部分
intermediate_resutls_path_detection = os.path.join(intermediate_resutls_path,'detection')
ReID_features_detection = np.load(os.path.join(intermediate_resutls_path_detection, '{}_ReID_features_detection.npy'.format(action_index)))
ReID_features_detection = [_ for _ in ReID_features_detection] # 转换为我们需要的格式
# 把这个文件夹下的图片名称读出来。
sub_imgs_names_detection = [img_name_detection for img_name_detection in os.listdir(intermediate_resutls_path_detection) if
img_name_detection.split('.')[-1] == 'jpg']
# 把图片名字按升序排列
sub_imgs_names_detection = sorted(sub_imgs_names_detection, key=lambda img_index: int(img_index.split('.')[0]))
sub_imgs_detection = []
for img_name_detection in sub_imgs_names_detection:
sub_img_detection = cv2.imread(os.path.join(intermediate_resutls_path_detection, img_name_detection))
sub_imgs_detection.append(sub_img_detection)
return action_index,sub_imgs_tracking,ReID_features_tracking,sub_imgs_detection,ReID_features_detection
if __name__ == "__main__":
from opt import opt
from FairMot.lib.opts import opts
from CalibrateTransfer.img_operation import ScreenSHot
detector_opt = opts().init()
queueSize = 1000
Tracker_output_queue = Queue(1000)
dir_name = opt.dir_name
root_path = os.path.join(opt.data_root, '{}'.format(dir_name))
file_name = opt.file_name
Videoparameters, \
setting_parameter, \
action_datas, \
channel_list, \
parameter = read_data_from_json_file(root_path, file_name, opt)
vis_path = os.path.join(root_path, 'vis')
os.makedirs(vis_path, exist_ok=True)
multi = 10
C_T_output_queue = Queue(queueSize)
transfer = Calibrate_transfer(opt, detector_opt, Tracker_output_queue, C_T_output_queue, vis=True, queueSize=1024)
transfer.update_()
transfer.detect_()
transfer.postProcess_()
for index in range(len(action_datas)):
channel,action_time,img_point,video_parameter = read_subdata(action_datas[index],Videoparameters)
Message = ScreenSHot(img_point, action_time=action_time, video_parameter=video_parameter,
setting_parameter=setting_parameter)
if Message[0] == True:
count = 1
# 根据操作员点击的点,进行区域截图。
img0, reference_point, sub_img_bias = Message[1], Message[2], Message[3]
cv2.circle(img0, (int(reference_point[0]), int(reference_point[1])), radius=5, color=(0, 255, 0),
thickness=-1) # 原始点为红色
vis_dir_ = os.path.join(vis_path,'{}'.format(index))
os.makedirs(vis_dir_,exist_ok=True)
cv2.imwrite(os.path.join(vis_dir_,'target.jpg'),img0)
# detect bboxes and calculate the scores of each bbox
img_points = [img_point] * multi
Tracker_output_queue.put((True,index,[[action_time for i in range(multi)], [], [], img_points]))
transfer.t_update.join()
transfer.t_detect.join()
transfer.t_postProcess.join()
|
test.py
|
# -*- coding: utf-8 -*-
import io
import os
import sys
import json
import datetime
import decimal
from time import *
import threading
coding = 'utf-8'
def test():
import informixdb
conn = informixdb.connect('odbc_demodb@ol_gbasedbt10','gbasedbt','P@ssw0rd0LD')
conn.autocommit = 0 # WE NEED THIS FOR DDL TX COMMIT
cursor = conn.cursor()
cursor.execute("drop table if exists ifxdbtest;")
stmt_list = ['create table ifxdbtest(']
stmt_list.append('uid integer')
stmt_list.append(',uname varchar(100)')
stmt_list.append(',udate date')
stmt_list.append(',udatetime datetime year to fraction(5)')
stmt_list.append(',ufloat float')
stmt_list.append(',udecimal decimal(12,3)')
stmt_list.append(',utext text')
stmt_list.append(',uclob clob')
stmt_list.append(',ubyte byte')
stmt_list.append(',ublob blob')
stmt_list.append(',primary key (uid)')
stmt_list.append(') put ublob in (')
stmt_list.append('sbdbs')
stmt_list.append(');')
stmt = ''.join(stmt_list)
print(stmt)
cursor.execute(stmt)
stmt_list = ['insert into ifxdbtest(']
stmt_list.append('uid')
stmt_list.append(',uname')
stmt_list.append(',udate')
stmt_list.append(',udatetime')
stmt_list.append(',ufloat')
stmt_list.append(',udecimal')
stmt_list.append(',utext')
stmt_list.append(',uclob')
stmt_list.append(',ubyte')
stmt_list.append(',ublob')
stmt_list.append(')')
stmt_list.append(' values(?')
stmt_list.append(',?')
stmt_list.append(',?')
stmt_list.append(',?')
stmt_list.append(',?')
stmt_list.append(',?')
stmt_list.append(',?')
stmt_list.append(',?')
stmt_list.append(',?')
stmt_list.append(',?')
stmt_list.append(')')
stmt = ''.join(stmt_list)
begin_time = time()
print(stmt)
params = []
lobbuf_size=int(1024000)
uid = int(666)
params.append(uid)
uname = '卡布达'
params.append(uname)
udate = datetime.date(2021,12,3)
params.append(udate)
udatetime = datetime.datetime.now()
params.append(udatetime)
ufloat = float(514.123)
params.append(ufloat)
udecimal = decimal.Decimal('123123.412')
params.append(udecimal)
with open('/etc/passwd', 'rb') as f:
utext = f.read()
params.append(utext)
uclob = conn.Sblob(1) # DEFINED IN SOURCE FILE
with open('/etc/services', 'rb') as f:
while True:
t = f.read(lobbuf_size);
if(t):
uclob.write(t)
else:
break
uclob.close()
params.append(uclob)
with open('./cat.jpg', 'rb') as f:
ubyte = f.read()
params.append(ubyte)
ublob = conn.Sblob(0) # DEFINED IN SOURCE FILE
with open('./cat.jpg', 'rb') as f:
while True:
t = f.read(lobbuf_size);
if(t):
ublob.write(t)
else:
break
ublob.close()
params.append(ublob)
cursor.prepare(stmt)
data = []
ts = []
ret = cursor.execute(None,params)
#BULK INSERT CAN ONLY WORK FAST WITHOUT ANY LOB/SLOB TYPE
#for i in range(10000):
#th = threading.Thread(target=cursor.execute,args=[None,params])
#s.append(th)
#ret = cursor.execute(None,params) # INSERT 10000 TIME ROW BY ROW ELASPED 2s
#data.append(params)
end_time = time()
paratime = end_time - begin_time
print('paratime:',paratime)
begin_time = time()
#for t in ts:
# t.start()
# t.join()
#ret = cursor.executemany(None,data) # INSERT 10000 ROWS IN BULK ELAPSED 0.8s
#use cursor.callproc(func,param[1,2,3])
conn.commit()
end_time = time()
exectime = end_time - begin_time
print('exectime:',exectime)
begin_time = time()
print('Rows Affected:' + str(ret))
stmt = "select * from ifxdbtest"
cursor.execute(stmt)
colno = len(cursor.description)
print('Column Number:' + str(colno))
print('')
for r in cursor.description:
print("Name:" + r[0] + "\t", end='')
print("Type:" + r[1] + "\t", end='')
print("Xid:" + str(r[2]) + "\t", end='')
print("Length:" + str(r[3]) + "\t", end='')
print("Nullable:" + str(r[6]))
ret = cursor.fetchall()
# use fetchone or fetchmany(N) as need
print('')
for row in ret:
for idx,col in enumerate(row):
type = cursor.description[idx][1]
if(type == 'text'):
with open('./text_passwd', 'wb') as f:
f.write(col)
elif (type == 'byte'):
with open('./byte_cat.jpg', 'wb') as f:
f.write(col)
#Sblob can also "seek", "tell", "stat", "truncate" as needed
elif(cursor.description[idx][1] == 'fixed udt \'clob\''):
col.open()
with open('./clob_services', 'wb') as f:
while (1):
buf=col.read(lobbuf_size)
if(buf):
f.write(buf)
else:
break
col.close()
elif (cursor.description[idx][1] == 'fixed udt \'blob\''):
col.open()
with open('./blob_cat.jpg', 'wb') as f:
while (1):
buf=col.read(lobbuf_size)
if(buf):
f.write(buf)
else:
break
col.close()
else:
print(col)
print("Row Count:"+str(len(ret)))
conn.close()
sys.exit(0)
if __name__ == '__main__':
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
test()
|
session_test.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import tensorflow.python.platform
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import config_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray(10.0, dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session():
for dtype in [dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
out_t.eval(feed_dict={feed_t: np_array}))
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaises(RuntimeError):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, "Cannot interpret feed_dict"):
sess.run(a, feed_dict={'a': [2.0]})
if __name__ == '__main__':
googletest.main()
|
generate-dataset-canny.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Hongzhuo Liang
# E-mail : liang@informatik.uni-hamburg.de
# Description:
# Date : 20/05/2018 2:45 PM
# File Name : generate-dataset-canny.py
import numpy as np
import sys
import pickle
from dexnet.grasping.quality import PointGraspMetrics3D
from dexnet.grasping import GaussianGraspSampler, AntipodalGraspSampler, UniformGraspSampler, GpgGraspSampler
from dexnet.grasping import RobotGripper, GraspableObject3D, GraspQualityConfigFactory, PointGraspSampler
import dexnet
from autolab_core import YamlConfig
from meshpy.obj_file import ObjFile
from meshpy.sdf_file import SdfFile
import os
import multiprocessing
import matplotlib.pyplot as plt
plt.switch_backend('agg') # for the convenient of run on remote computer
def get_file_name(file_dir_):
file_list = []
for root, dirs, files in os.walk(file_dir_):
if root.count('/') == file_dir_.count('/') + 1:
file_list.append(root)
file_list.sort()
return file_list
def do_job(i):
object_name = file_list_all[i][len(home_dir) + 35:]
good_grasp = multiprocessing.Manager().list()
p_set = [multiprocessing.Process(target=worker, args=(i, 100, 20, good_grasp)) for _ in
range(50)] # grasp_amount per friction: 20*40
[p.start() for p in p_set]
[p.join() for p in p_set]
good_grasp = list(good_grasp)
good_grasp_file_name = "./generated_grasps/{}_{}_{}".format(filename_prefix, str(object_name), str(len(good_grasp)))
with open(good_grasp_file_name + '.pickle', 'wb') as f:
pickle.dump(good_grasp, f)
tmp = []
for grasp in good_grasp:
grasp_config = grasp[0].configuration
score_friction = grasp[1]
score_canny = grasp[2]
tmp.append(np.concatenate([grasp_config, [score_friction, score_canny]]))
np.save(good_grasp_file_name + '.npy', np.array(tmp))
print("finished job ", object_name)
def worker(i, sample_nums, grasp_amount, good_grasp):
object_name = file_list_all[i][len(home_dir) + 35:]
print('ls'
''.format(object_name))
yaml_config = YamlConfig(home_dir + "/code/grasp-pointnet/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
gripper = RobotGripper.load(gripper_name, home_dir + "/code/grasp-pointnet/dex-net/data/grippers")
grasp_sample_method = "antipodal"
if grasp_sample_method == "uniform":
ags = UniformGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "gaussian":
ags = GaussianGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "antipodal":
ags = AntipodalGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "gpg":
ags = GpgGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "point":
ags = PointGraspSampler(gripper, yaml_config)
else:
raise NameError("Can't support this sampler")
print("Log: do job", i)
if os.path.exists(str(file_list_all[i]) + "/google_512k/nontextured.obj"):
of = ObjFile(str(file_list_all[i]) + "/google_512k/nontextured.obj")
sf = SdfFile(str(file_list_all[i]) + "/google_512k/nontextured.sdf")
else:
print("can't find any obj or sdf file!")
raise NameError("can't find any obj or sdf file!")
mesh = of.read()
sdf = sf.read()
obj = GraspableObject3D(sdf, mesh)
print("Log: opened object", i + 1, object_name)
force_closure_quality_config = {}
canny_quality_config = {}
fc_list_sub1 = np.arange(2.0, 0.75, -0.4)
fc_list_sub2 = np.arange(0.5, 0.36, -0.05)
fc_list = np.concatenate([fc_list_sub1, fc_list_sub2])
for value_fc in fc_list:
value_fc = round(value_fc, 2)
yaml_config['metrics']['force_closure']['friction_coef'] = value_fc
yaml_config['metrics']['robust_ferrari_canny']['friction_coef'] = value_fc
force_closure_quality_config[value_fc] = GraspQualityConfigFactory.create_config(
yaml_config['metrics']['force_closure'])
canny_quality_config[value_fc] = GraspQualityConfigFactory.create_config(
yaml_config['metrics']['robust_ferrari_canny'])
good_count_perfect = np.zeros(len(fc_list))
count = 0
minimum_grasp_per_fc = grasp_amount
while np.sum(good_count_perfect < minimum_grasp_per_fc) != 0:
grasps = ags.generate_grasps(obj, target_num_grasps=sample_nums, grasp_gen_mult=10,
vis=False, random_approach_angle=True)
count += len(grasps)
for j in grasps:
tmp, is_force_closure = False, False
for ind_, value_fc in enumerate(fc_list):
value_fc = round(value_fc, 2)
tmp = is_force_closure
is_force_closure = PointGraspMetrics3D.grasp_quality(j, obj,
force_closure_quality_config[value_fc], vis=False)
if tmp and not is_force_closure:
if good_count_perfect[ind_ - 1] < minimum_grasp_per_fc:
canny_quality = PointGraspMetrics3D.grasp_quality(j, obj,
canny_quality_config[
round(fc_list[ind_ - 1], 2)],
vis=False)
good_grasp.append((j, round(fc_list[ind_ - 1], 2), canny_quality))
good_count_perfect[ind_ - 1] += 1
break
elif is_force_closure and value_fc == fc_list[-1]:
if good_count_perfect[ind_] < minimum_grasp_per_fc:
canny_quality = PointGraspMetrics3D.grasp_quality(j, obj,
canny_quality_config[value_fc], vis=False)
good_grasp.append((j, value_fc, canny_quality))
good_count_perfect[ind_] += 1
break
print('Object:{} GoodGrasp:{}'.format(object_name, good_count_perfect))
object_name_len = len(object_name)
object_name_ = str(object_name) + " " * (25 - object_name_len)
if count == 0:
good_grasp_rate = 0
else:
good_grasp_rate = len(good_grasp) / count
print('Gripper:{} Object:{} Rate:{:.4f} {}/{}'.
format(gripper_name, object_name_, good_grasp_rate, len(good_grasp), count))
if __name__ == '__main__':
if len(sys.argv) > 1:
filename_prefix = sys.argv[1]
else:
filename_prefix = "default"
home_dir = os.environ['HOME']
file_dir = home_dir + "/dataset/ycb_meshes_google/objects"
file_list_all = get_file_name(file_dir)
object_numbers = 2
job_list = np.arange(object_numbers)
job_list = list(job_list)
pool_size = 1 # number of jobs did at same time
assert (pool_size <= len(job_list))
# Initialize pool
pool = []
for _ in range(pool_size):
job_i = job_list.pop(0)
pool.append(multiprocessing.Process(target=do_job, args=(job_i,)))
[p.start() for p in pool]
# refill
while len(job_list) > 0:
for ind, p in enumerate(pool):
if not p.is_alive():
pool.pop(ind)
job_i = job_list.pop(0)
p = multiprocessing.Process(target=do_job, args=(job_i,))
p.start()
pool.append(p)
break
print('All job done.')
|
cleaner.py
|
# -*- coding: utf-8 -*-
# Copyright 2013-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2013-2015
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013
# - Vincent Garonne <vincent.garonne@cern.ch>, 2014-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020-2021
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
"""
Judge-Cleaner is a daemon to clean expired replication rules.
"""
import logging
import os
import socket
import threading
import time
from copy import deepcopy
from datetime import datetime, timedelta
from random import randint
from re import match
from sqlalchemy.exc import DatabaseError
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.logging import formatted_logger, setup_logging
from rucio.common.exception import DatabaseException, UnsupportedOperation, RuleNotFound
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.monitor import record_counter
from rucio.core.rule import delete_rule, get_expired_rules
from rucio.db.sqla.util import get_db_time
graceful_stop = threading.Event()
def rule_cleaner(once=False):
"""
Main loop to check for expired replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-cleaners have the correct worker number on the next try
executable = 'judge-cleaner'
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
prefix = 'judge-cleaner[%i/%i] ' % (heartbeat['assign_thread'], heartbeat['nr_threads'])
logger = formatted_logger(logging.log, prefix + '%s')
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
prefix = 'judge-cleaner[%i/%i] ' % (heartbeat['assign_thread'], heartbeat['nr_threads'])
logger = formatted_logger(logging.log, prefix + '%s')
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
rules = get_expired_rules(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'],
limit=200,
blacklisted_rules=[key for key in paused_rules])
logger(logging.DEBUG, 'index query time %f fetch size is %d' % (time.time() - start, len(rules)))
if not rules and not once:
logger(logging.DEBUG, 'did not get any work (paused_rules=%s)' % str(len(paused_rules)))
graceful_stop.wait(60)
else:
for rule in rules:
rule_id = rule[0]
rule_expression = rule[1]
logger(logging.INFO, 'Deleting rule %s with expression %s' % (rule_id, rule_expression))
if graceful_stop.is_set():
break
try:
start = time.time()
delete_rule(rule_id=rule_id, nowait=True)
logger(logging.DEBUG, 'deletion of %s took %f' % (rule_id, time.time() - start))
except (DatabaseException, DatabaseError, UnsupportedOperation) as e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(600, 2400))
record_counter('rule.judge.exceptions.LocksDetected')
logger(logging.WARNING, 'Locks detected for %s' % rule_id)
elif match('.*QueuePool.*', str(e.args[0])):
logger(logging.WARNING, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logger(logging.WARNING, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logger(logging.ERROR, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except RuleNotFound:
pass
except (DatabaseException, DatabaseError) as e:
if match('.*QueuePool.*', str(e.args[0])):
logger(logging.WARNING, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logger(logging.WARNING, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logger(logging.CRITICAL, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception as e:
logger(logging.CRITICAL, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Clean threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
client_time, db_time = datetime.utcnow(), get_db_time()
max_offset = timedelta(hours=1, seconds=10)
if type(db_time) is datetime:
if db_time - client_time > max_offset or client_time - db_time > max_offset:
logging.critical('Offset between client and db time too big. Stopping Cleaner')
return
executable = 'judge-cleaner'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
rule_cleaner(once)
else:
logging.info('Cleaner starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_cleaner, kwargs={'once': once}) for i in range(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
USBCameraStream.py
|
"""
The MIT License (MIT)
Copyright (c) 2015 Adrian Rosebrock, http://www.pyimagesearch.com
"""
# import the necessary packages
from threading import Thread
import cv2
class USBCameraStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture()
self.stream.open(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
# self.error = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def close(self):
"""
Clean up OpenCV Camera
"""
self.stream.release()
|
keys.py
|
# Code by Daniel Kukiela (https://twitter.com/daniel_kukiela)
import ctypes
from threading import Thread
from time import time, sleep
from queue import Queue
# main keys class
class Keys(object):
common = None
standalone = False
# instance of worker class
keys_worker = None
keys_process = None
# key constants
direct_keys = 0x0008
virtual_keys = 0x0000
key_press = 0x0000
key_release = 0x0002
# mouse constants
mouse_move = 0x0001
mouse_lb_press = 0x0002
mouse_lb_release = 0x0004
mouse_rb_press = 0x0008
mouse_rb_release = 0x0010
mouse_mb_press = 0x0020
mouse_mb_release = 0x0040
# direct keys
dk = {
"1": 0x02,
"2": 0x03,
"3": 0x04,
"4": 0x05,
"5": 0x06,
"6": 0x07,
"7": 0x08,
"8": 0x09,
"9": 0x0A,
"0": 0x0B,
"NUMPAD1": 0x4F, "NP1": 0x4F,
"NUMPAD2": 0x50, "NP2": 0x50,
"NUMPAD3": 0x51, "NP3": 0x51,
"NUMPAD4": 0x4B, "NP4": 0x4B,
"NUMPAD5": 0x4C, "NP5": 0x4C,
"NUMPAD6": 0x4D, "NP6": 0x4D,
"NUMPAD7": 0x47, "NP7": 0x47,
"NUMPAD8": 0x48, "NP8": 0x48,
"NUMPAD9": 0x49, "NP9": 0x49,
"NUMPAD0": 0x52, "NP0": 0x52,
"DIVIDE": 0xB5, "NPDV": 0xB5,
"MULTIPLY": 0x37, "NPM": 0x37,
"SUBSTRACT": 0x4A, "NPS": 0x4A,
"ADD": 0x4E, "NPA": 0x4E,
"DECIMAL": 0x53, "NPDC": 0x53,
"NUMPADENTER": 0x9C, "NPE": 0x9C,
"A": 0x1E,
"B": 0x30,
"C": 0x2E,
"D": 0x20,
"E": 0x12,
"F": 0x21,
"G": 0x22,
"H": 0x23,
"I": 0x17,
"J": 0x24,
"K": 0x25,
"L": 0x26,
"M": 0x32,
"N": 0x31,
"O": 0x18,
"P": 0x19,
"Q": 0x10,
"R": 0x13,
"S": 0x1F,
"T": 0x14,
"U": 0x16,
"V": 0x2F,
"W": 0x11,
"X": 0x2D,
"Y": 0x15,
"Z": 0x2C,
"F1": 0x3B,
"F2": 0x3C,
"F3": 0x3D,
"F4": 0x3E,
"F5": 0x3F,
"F6": 0x40,
"F7": 0x41,
"F8": 0x42,
"F9": 0x43,
"F10": 0x44,
"F11": 0x57,
"F12": 0x58,
"UP": 0xC8,
"LEFT": 0xCB,
"RIGHT": 0xCD,
"DOWN": 0xD0,
"ESC": 0x01,
"SPACE": 0x39, "SPC": 0x39,
"RETURN": 0x1C, "ENT": 0x1C,
"INSERT": 0xD2, "INS": 0xD2,
"DELETE": 0xD3, "DEL": 0xD3,
"HOME": 0xC7,
"END": 0xCF,
"PRIOR": 0xC9, "PGUP": 0xC9,
"NEXT": 0xD1, "PGDN": 0xD1,
"BACK": 0x0E,
"TAB": 0x0F,
"LCONTROL": 0x1D, "LCTRL": 0x1D,
"RCONTROL": 0x9D, "RCTRL": 0x9D,
"LSHIFT": 0x2A, "LSH": 0x2A,
"RSHIFT": 0x36, "RSH": 0x36,
"LMENU": 0x38, "LALT": 0x38,
"RMENU": 0xB8, "RALT": 0xB8,
"LWIN": 0xDB,
"RWIN": 0xDC,
"APPS": 0xDD,
"CAPITAL": 0x3A, "CAPS": 0x3A,
"NUMLOCK": 0x45, "NUM": 0x45,
"SCROLL": 0x46, "SCR": 0x46,
"MINUS": 0x0C, "MIN": 0x0C,
"LBRACKET": 0x1A, "LBR": 0x1A,
"RBRACKET": 0x1B, "RBR": 0x1B,
"SEMICOLON": 0x27, "SEM": 0x27,
"APOSTROPHE": 0x28, "APO": 0x28,
"GRAVE": 0x29, "GRA": 0x29,
"BACKSLASH": 0x2B, "BSL": 0x2B,
"COMMA": 0x33, "COM": 0x33,
"PERIOD": 0x34, "PER": 0x34,
"SLASH": 0x35, "SLA": 0x35,
}
# virtual keys
vk = {
"1": 0x31,
"2": 0x32,
"3": 0x33,
"4": 0x34,
"5": 0x35,
"6": 0x36,
"7": 0x37,
"8": 0x38,
"9": 0x39,
"0": 0x30,
"NUMPAD1": 0x61, "NP1": 0x61,
"NUMPAD2": 0x62, "NP2": 0x62,
"NUMPAD3": 0x63, "NP3": 0x63,
"NUMPAD4": 0x64, "NP4": 0x64,
"NUMPAD5": 0x65, "NP5": 0x65,
"NUMPAD6": 0x66, "NP6": 0x66,
"NUMPAD7": 0x67, "NP7": 0x67,
"NUMPAD8": 0x68, "NP8": 0x68,
"NUMPAD9": 0x69, "NP9": 0x69,
"NUMPAD0": 0x60, "NP0": 0x60,
"DIVIDE": 0x6F, "NPDV": 0x6F,
"MULTIPLY": 0x6A, "NPM": 0x6A,
"SUBSTRACT": 0x6D, "NPS": 0x6D,
"ADD": 0x6B, "NPA": 0x6B,
"DECIMAL": 0x6E, "NPDC": 0x6E,
"NUMPADENTER": 0x0D, "NPE": 0x0D,
"A": 0x41,
"B": 0x42,
"C": 0x43,
"D": 0x44,
"E": 0x45,
"F": 0x46,
"G": 0x47,
"H": 0x48,
"I": 0x49,
"J": 0x4A,
"K": 0x4B,
"L": 0x4C,
"M": 0x4D,
"N": 0x4E,
"O": 0x4F,
"P": 0x50,
"Q": 0x51,
"R": 0x52,
"S": 0x53,
"T": 0x54,
"U": 0x55,
"V": 0x56,
"W": 0x57,
"X": 0x58,
"Y": 0x59,
"Z": 0x5A,
"F1": 0x70,
"F2": 0x71,
"F3": 0x72,
"F4": 0x73,
"F5": 0x74,
"F6": 0x75,
"F7": 0x76,
"F8": 0x77,
"F9": 0x78,
"F10": 0x79,
"F11": 0x7A,
"F12": 0x7B,
"UP": 0x26,
"LEFT": 0x25,
"RIGHT": 0x27,
"DOWN": 0x28,
"ESC": 0x1B,
"SPACE": 0x20, "SPC": 0x20,
"RETURN": 0x0D, "ENT": 0x0D,
"INSERT": 0x2D, "INS": 0x2D,
"DELETE": 0x2E, "DEL": 0x2E,
"HOME": 0x24,
"END": 0x23,
"PRIOR": 0x21, "PGUP": 0x21,
"NEXT": 0x22, "PGDN": 0x22,
"BACK": 0x08,
"TAB": 0x09,
"LCONTROL": 0xA2, "LCTRL": 0xA2,
"RCONTROL": 0xA3, "RCTRL": 0xA3,
"LSHIFT": 0xA0, "LSH": 0xA0,
"RSHIFT": 0xA1, "RSH": 0xA1,
"LMENU": 0xA4, "LALT": 0xA4,
"RMENU": 0xA5, "RALT": 0xA5,
"LWIN": 0x5B,
"RWIN": 0x5C,
"APPS": 0x5D,
"CAPITAL": 0x14, "CAPS": 0x14,
"NUMLOCK": 0x90, "NUM": 0x90,
"SCROLL": 0x91, "SCR": 0x91,
"MINUS": 0xBD, "MIN": 0xBD,
"LBRACKET": 0xDB, "LBR": 0xDB,
"RBRACKET": 0xDD, "RBR": 0xDD,
"SEMICOLON": 0xBA, "SEM": 0xBA,
"APOSTROPHE": 0xDE, "APO": 0xDE,
"GRAVE": 0xC0, "GRA": 0xC0,
"BACKSLASH": 0xDC, "BSL": 0xDC,
"COMMA": 0xBC, "COM": 0xBC,
"PERIOD": 0xBE, "PER": 0xBE,
"SLASH": 0xBF, "SLA": 0xBF,
}
# setup object
def __init__(self, common = None):
self.keys_worker = KeysWorker(self)
# Thread(target=self.keys_worker.processQueue).start()
self.common = common
if common is None:
self.standalone = True
# parses keys string and adds keys to the queue
def parseKeyString(self, string):
# print keys
if not self.standalone:
self.common.info("Processing keys: %s" % string)
key_queue = []
errors = []
# defaults to direct keys
key_type = self.direct_keys
# split by comma
keys = string.upper().split(",")
# translate
for key in keys:
# up, down or stroke?
up = True
down = True
direction = key.split("_")
subkey = direction[0]
if len(direction) >= 2:
if direction[1] == 'UP':
down = False
else:
up = False
# switch to virtual keys
if subkey == "VK":
key_type = self.virtual_keys
# switch to direct keys
elif subkey == "DK":
key_type = self.direct_keys
# key code
elif subkey.startswith("0x"):
subkey = int(subkey, 16)
if subkey > 0 and subkey < 256:
key_queue.append({
"key": int(subkey),
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
else:
errors.append(key)
# pause
elif subkey.startswith("-"):
time = float(subkey.replace("-", ""))/1000
if time > 0 and time <= 10:
key_queue.append({
"key": None,
"okey": "",
"time": time,
"up": False,
"down": False,
"type": None,
})
else:
errors.append(key)
# direct key
elif key_type == self.direct_keys and subkey in self.dk:
key_queue.append({
"key": self.dk[subkey],
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
# virtual key
elif key_type == self.virtual_keys and subkey in self.vk:
key_queue.append({
"key": self.vk[subkey],
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
# no match?
else:
errors.append(key)
# if there are errors, do not process keys
if len(errors):
return errors
# create new thread if there is no active one
if self.keys_process is None or not self.keys_process.isAlive():
self.keys_process = Thread(target=self.keys_worker.processQueue)
self.keys_process.start()
# add keys to queue
for i in key_queue:
self.keys_worker.key_queue.put(i)
self.keys_worker.key_queue.put(None)
return True
# direct key press
def directKey(self, key, direction = None, type = None):
if type is None:
type = self.direct_keys
if direction is None:
direction = self.key_press
if key.startswith("0x"):
key = int(key, 16)
else:
key = key.upper()
lookup_table = self.dk if type == self.direct_keys else self.vk
key = lookup_table[key] if key in lookup_table else 0x0000
self.keys_worker.sendKey(key, direction | type)
# direct mouse move or button press
def directMouse(self, dx = 0, dy = 0, buttons = 0):
self.keys_worker.sendMouse(dx, dy, buttons)
# threaded sending keys class
class KeysWorker():
# keys object
keys = None
# queue of keys
key_queue = Queue()
# init
def __init__(self, keys):
self.keys = keys
# main function, process key's queue in loop
def processQueue(self):
# endless loop
while True:
# get one key
key = self.key_queue.get()
# terminate process if queue is empty
if key is None:
self.key_queue.task_done()
if self.key_queue.empty():
return
continue
# print key
elif not self.keys.standalone:
self.keys.common.info("Key: \033[1;35m%s/%s\033[0;37m, duration: \033[1;35m%f\033[0;37m, direction: \033[1;35m%s\033[0;37m, type: \033[1;35m%s" % (
key["okey"] if key["okey"] else "None",
key["key"], key["time"],
"UP" if key["up"] and not key["down"] else "DOWN" if not key["up"] and key["down"] else "BOTH" if key["up"] and key["down"] else "NONE",
"None" if key["type"] is None else "DK" if key["type"] == self.keys.direct_keys else "VK"), "\033[0;35mKEY: \033[0;37m"
)
# if it's a key
if key["key"]:
# press
if key["down"]:
self.sendKey(key["key"], self.keys.key_press | key["type"])
# wait
sleep(key["time"])
# and release
if key["up"]:
self.sendKey(key["key"], self.keys.key_release | key["type"])
# not an actual key, just pause
else:
sleep(key["time"])
# mark as done (decrement internal queue counter)
self.key_queue.task_done()
# send key
def sendKey(self, key, type):
self.SendInput(self.Keyboard(key, type))
# send mouse
def sendMouse(self, dx, dy, buttons):
if dx != 0 or dy != 0:
buttons |= self.keys.mouse_move
self.SendInput(self.Mouse(buttons, dx, dy))
# send input
def SendInput(self, *inputs):
nInputs = len(inputs)
LPINPUT = INPUT * nInputs
pInputs = LPINPUT(*inputs)
cbSize = ctypes.c_int(ctypes.sizeof(INPUT))
return ctypes.windll.user32.SendInput(nInputs, pInputs, cbSize)
# get input object
def Input(self, structure):
if isinstance(structure, MOUSEINPUT):
return INPUT(0, _INPUTunion(mi=structure))
if isinstance(structure, KEYBDINPUT):
return INPUT(1, _INPUTunion(ki=structure))
if isinstance(structure, HARDWAREINPUT):
return INPUT(2, _INPUTunion(hi=structure))
raise TypeError('Cannot create INPUT structure!')
# mouse input
def MouseInput(self, flags, x, y, data):
return MOUSEINPUT(x, y, data, flags, 0, None)
# keyboard input
def KeybdInput(self, code, flags):
return KEYBDINPUT(code, code, flags, 0, None)
# hardware input
def HardwareInput(self, message, parameter):
return HARDWAREINPUT(message & 0xFFFFFFFF,
parameter & 0xFFFF,
parameter >> 16 & 0xFFFF)
# mouse object
def Mouse(self, flags, x=0, y=0, data=0):
return self.Input(self.MouseInput(flags, x, y, data))
# keyboard object
def Keyboard(self, code, flags=0):
return self.Input(self.KeybdInput(code, flags))
# hardware object
def Hardware(self, message, parameter=0):
return self.Input(self.HardwareInput(message, parameter))
# types
LONG = ctypes.c_long
DWORD = ctypes.c_ulong
ULONG_PTR = ctypes.POINTER(DWORD)
WORD = ctypes.c_ushort
class MOUSEINPUT(ctypes.Structure):
_fields_ = (('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD))
class _INPUTunion(ctypes.Union):
_fields_ = (('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT))
class INPUT(ctypes.Structure):
_fields_ = (('type', DWORD),
('union', _INPUTunion))
#example:
if __name__ == '__main__':
sleep(3)
keys = Keys()
# mouse movement
for i in range(100):
keys.directMouse(-1*i, 0)
sleep(0.004)
# mouse keys
keys.directMouse(buttons=keys.mouse_rb_press)
sleep(0.5)
keys.directMouse(buttons=keys.mouse_lb_press)
sleep(2)
keys.directMouse(buttons=keys.mouse_lb_release)
sleep(0.5)
keys.directMouse(buttons=keys.mouse_rb_release)
# or
keys.directMouse(buttons=keys.mouse_lb_press | keys.mouse_rb_press)
sleep(2)
keys.directMouse(buttons=keys.mouse_lb_release | keys.mouse_rb_release)
# keyboard (direct keys)
keys.directKey("a")
sleep(0.04)
keys.directKey("a", keys.key_release)
# keyboard (virtual keys)
keys.directKey("a", type=keys.virtual_keys)
sleep(0.04)
keys.directKey("a", keys.key_release, keys.virtual_keys)
# queue of keys (direct keys, threaded, only for keybord input)
keys.parseKeyString("a_down,-4,a_up,0x01") # -4 - pause for 4 ms, 0x00 - hex code of Esc
# queue of keys (virtual keys, threaded, only for keybord input)
keys.parseKeyString("vk,a_down,-4,a_up") # -4 - pause for 4 ms
|
sqlite3_isolation_levels.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Illustrate the effect of isolation levels.
"""
#end_pymotw_header
import logging
import sqlite3
import sys
import threading
import time
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s (%(threadName)-10s) %(message)s',
)
db_filename = 'todo.db'
isolation_level = sys.argv[1]
def writer():
my_name = threading.currentThread().name
with sqlite3.connect(db_filename,
isolation_level=isolation_level) as conn:
cursor = conn.cursor()
cursor.execute('update task set priority = priority + 1')
logging.debug('waiting to synchronize')
ready.wait() # synchronize threads
logging.debug('PAUSING')
time.sleep(1)
conn.commit()
logging.debug('CHANGES COMMITTED')
return
def reader():
my_name = threading.currentThread().name
with sqlite3.connect(db_filename,
isolation_level=isolation_level) as conn:
cursor = conn.cursor()
logging.debug('waiting to synchronize')
ready.wait() # synchronize threads
logging.debug('wait over')
cursor.execute('select * from task')
logging.debug('SELECT EXECUTED')
results = cursor.fetchall()
logging.debug('results fetched')
return
if __name__ == '__main__':
ready = threading.Event()
threads = [
threading.Thread(name='Reader 1', target=reader),
threading.Thread(name='Reader 2', target=reader),
threading.Thread(name='Writer 1', target=writer),
threading.Thread(name='Writer 2', target=writer),
]
[ t.start() for t in threads ]
time.sleep(1)
logging.debug('setting ready')
ready.set()
[ t.join() for t in threads ]
|
upnp.py
|
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "kujenga", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run kujenga, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join(5)
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
grpc_comm_manager.py
|
import logging
import os
import threading
from concurrent import futures
from typing import List
import grpc
from ..gRPC import grpc_comm_manager_pb2_grpc, grpc_comm_manager_pb2
lock = threading.Lock()
from ...communication.base_com_manager import BaseCommunicationManager
from ...communication.message import Message
from ...communication.observer import Observer
from ...communication.gRPC.grpc_server import GRPCCOMMServicer
import csv
class GRPCCommManager(BaseCommunicationManager):
def __init__(self, host, port, ip_config_path, topic="fedml", client_id=0, client_num=0):
# host is the ip address of server
self.host = host
self.port = str(port)
self._topic = topic
self.client_id = client_id
self.client_num = client_num
self._observers: List[Observer] = []
if client_id == 0:
self.node_type = "server"
else:
self.node_type = "client"
self.opts = [
("grpc.max_send_message_length", 1000 * 1024 * 1024),
("grpc.max_receive_message_length", 1000 * 1024 * 1024),
("grpc.enable_http_proxy", 0),
]
self.grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=client_num), options=self.opts)
self.grpc_servicer = GRPCCOMMServicer(host, port, client_num, client_id)
grpc_comm_manager_pb2_grpc.add_gRPCCommManagerServicer_to_server(self.grpc_servicer, self.grpc_server)
logging.info(os.getcwd())
self.ip_config = self._build_ip_table(ip_config_path)
# starts a grpc_server on local machine using ip address "0.0.0.0"
self.grpc_server.add_insecure_port("{}:{}".format("0.0.0.0", port))
self.grpc_server.start()
self.is_running = True
print("server started. Listening on port " + str(port))
def send_message(self, msg: Message):
payload = msg.to_json()
receiver_id = msg.get_receiver_id()
PORT_BASE = 50000
# lookup ip of receiver from self.ip_config table
receiver_ip = self.ip_config[str(receiver_id)]
channel_url = "{}:{}".format(receiver_ip, str(PORT_BASE + receiver_id))
channel = grpc.insecure_channel(channel_url, options=self.opts)
stub = grpc_comm_manager_pb2_grpc.gRPCCommManagerStub(channel)
request = grpc_comm_manager_pb2.CommRequest()
logging.info("sending message to {}".format(channel_url))
request.client_id = self.client_id
request.message = payload
stub.sendMessage(request)
logging.debug("sent successfully")
channel.close()
def add_observer(self, observer: Observer):
self._observers.append(observer)
def remove_observer(self, observer: Observer):
self._observers.remove(observer)
def handle_receive_message(self):
thread = threading.Thread(target=self.message_handling_subroutine)
thread.start()
def message_handling_subroutine(self):
while self.is_running:
if self.grpc_servicer.message_q.qsize() > 0:
lock.acquire()
msg_params_string = self.grpc_servicer.message_q.get()
msg_params = Message()
msg_params.init_from_json_string(msg_params_string)
msg_type = msg_params.get_type()
for observer in self._observers:
observer.receive_message(msg_type, msg_params)
lock.release()
return
def stop_receive_message(self):
self.grpc_server.stop(None)
self.is_running = False
def notify(self, message: Message):
msg_type = message.get_type()
for observer in self._observers:
observer.receive_message(msg_type, message)
def _build_ip_table(self, path):
ip_config = dict()
with open(path, newline="") as csv_file:
csv_reader = csv.reader(csv_file)
# skip header line
next(csv_reader)
for row in csv_reader:
receiver_id, receiver_ip = row
ip_config[receiver_id] = receiver_ip
return ip_config
|
refactor.py
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import logging
import operator
import collections
import io
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_utils as bu
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
if sys.version_info < (3, 0):
import codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
return input.replace("\r\n", "\n")
def _to_system_newlines(input):
if os.linesep != "\n":
return input.replace("\n", os.linesep)
else:
return input
else:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False,
"write_unchanged_files" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except AssertionError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except os.error as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(keepends=True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
sql_isolation_testcase.py
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.models import SQLTestCase
import pygresql.pg
from tinctest.lib import Gpdiff
import os
import subprocess
import re
import multiprocessing
import time
import sys
import socket
import tinctest
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
self.command_pattern = re.compile(r"^(\d+)([&\\<\\>Uq]?)\:(.*)")
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, utility_mode, dbname):
self.name = name
self.utility_mode = utility_mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.utility_mode, self.out_file.name, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.2)
if self.pipe.poll(0):
raise Exception("Forked command is not blocking")
self.has_open = True
def join(self):
print >>self.out_file, " <... completed>"
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.strip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, utility_mode, output_file, pipe, dbname):
"""
Constructor
"""
self.name = name
self.utility_mode = utility_mode
self.pipe = pipe
self.dbname = dbname
if self.utility_mode:
(hostname, port) = self.get_utility_mode_port(name)
self.con = pygresql.pg.connect(host=hostname,
port=port,
opt="-c gp_session_role=utility",
dbname=self.dbname)
else:
self.con = pygresql.pg.connect(dbname=self.dbname)
self.filename = "%s.%s" % (output_file, os.getpid())
def get_utility_mode_port(self, name):
"""
Gets the port number/hostname combination of the
dbid with the id = name
"""
con = pygresql.pg.connect(port = int(os.environ.get("PGPORT", 5432)))
r = con.query("SELECT hostname, port FROM gp_segment_configuration WHERE dbid = %s" % name).getresult()
if len(r) == 0:
raise Exception("Invalid dbid %s" % name)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
def printout_result(self, r):
"""
This is a pretty dirty, but apprently the only way
to get the pretty output of the query result.
The reason is that for some python internal reason
print(r) calls the correct function while neighter str(r)
nor repr(r) output something useful.
"""
with open(self.filename, "w") as f:
print >>f, r,
f.flush()
with open(self.filename, "r") as f:
ppr = f.read()
return ppr.strip() + "\n"
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, self.printout_result(r))
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
if os.path.exists(self.filename):
os.unlink(self.filename)
def get_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not name.isdigit():
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, utility_mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, utility_mode, dbname)
return self.processes[(name, utility_mode)]
def quit_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not name.isdigit():
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, utility_mode)].quit()
del self.processes[(name, False)]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
self.get_process(output_file, process_name, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, dbname=dbname).join()
elif flag == "U":
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).query(sql.strip())
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, dbname=dbname)
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
for line in sql_file:
tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
(command_part, dummy, comment) = line.partition("--")
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line):
command += command_part
tinctest.logger.info("Processing command: %s" %command)
self.process_command(command, output_file)
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
# Skipping loading for this model class. Otherwise, by default, whenever this class is imported in sub-classes,
# unittest will load tests for this class as well. If there are sql files in the same folder as the model class,
# the loading mechanism of SQLTestCase will try to construct tests for those sqls which is not intended here.
# uao_udf.sql in the current directory is not a test sql and this will prevent constructing a test for that sql.
@tinctest.skipLoading("Model class. This annotation will prevent this class from loading tests when imported in sub-classes")
class SQLIsolationTestCase(SQLTestCase):
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
"""
def get_ans_suffix(self):
""" The method can be overwritten by a subclass to customize
the ans file behavior.
"""
return
def get_output_substitution(self):
""" The method can be overwritten by a subclass to return a list
of regular expression substitutions the output file should
be tranformed with.
This method can be used to unify an output file to
by remove output that is always different as e.g., an
oid in an error message
"""
return
def _transform_output_file(self, output_file):
"""
Transforms the output file based on the output
substitutions provided by the subclass.
The transformations are cached and pre-compiled to
reduce the overhead.
"""
if "_output_transforms" not in dir(self):
self._output_transforms = self.get_output_substitution()
if self._output_transforms != None:
self._output_transforms = [(re.compile(t[0]), t[1]) for t in self._output_transforms]
if self._output_transforms == None or len(self._output_transforms) == 0:
return
contents = ''
with open(output_file, 'r') as f:
contents += f.read()
with open(output_file, "w") as f:
for line in contents.splitlines():
for (p, r) in self._output_transforms:
line2 = p.sub(r, line)
print >>f, line2
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case databse (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
def verify_out_file(self, out_file, ans_file):
"""
The ans file might be replaced by a customized ans
file.
"""
def check_valid_suffix(suffix):
if not re.match("[a-zA-Z0-9]+", suffix):
raise Exception("Invalid ans file suffix %s" % suffix)
# Modify the ans file based on the suffix
suffix = self.get_ans_suffix()
if suffix:
check_valid_suffix(suffix)
new_ans_file = ans_file[:-4] + "_" + suffix + ".ans"
if os.path.exists(new_ans_file):
tinctest.logger.debug("Using customized ans file %s for this test" %new_ans_file)
ans_file = new_ans_file
if ans_file is not None:
self._transform_output_file(out_file)
self.test_artifacts.append(ans_file)
# Check if an init file exists in the same location as the sql file
init_files = []
init_file_path = os.path.join(self.get_sql_dir(), 'init_file')
if os.path.exists(init_file_path):
init_files.append(init_file_path)
result = Gpdiff.are_files_equal(out_file, ans_file, match_sub = init_files)
if result == False:
self.test_artifacts.append(out_file.replace('.out', '.diff'))
return result
if __name__ == "__main__":
executor = SQLIsolationExecutor()
executor.process_isolation_file(sys.stdin, sys.stdout)
|
session.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Manage sessions to the GraphScope coordinator.
"""
import atexit
import base64
import contextlib
import copy
import json
import logging
import os
import random
import sys
import threading
import time
import warnings
from queue import Empty as EmptyQueue
try:
from kubernetes import client as kube_client
from kubernetes import config as kube_config
except ImportError:
kube_client = None
kube_config = None
import graphscope
from graphscope.client.rpc import GRPCClient
from graphscope.client.utils import CaptureKeyboardInterrupt
from graphscope.client.utils import GSLogger
from graphscope.client.utils import set_defaults
from graphscope.config import GSConfig as gs_config
from graphscope.deploy.hosts.cluster import HostsClusterLauncher
from graphscope.deploy.kubernetes.cluster import KubernetesClusterLauncher
from graphscope.framework.errors import ConnectionError
from graphscope.framework.errors import FatalError
from graphscope.framework.errors import GRPCError
from graphscope.framework.errors import InteractiveEngineInternalError
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.errors import K8sError
from graphscope.framework.errors import LearningEngineInternalError
from graphscope.framework.errors import check_argument
from graphscope.framework.graph import Graph
from graphscope.framework.operation import Operation
from graphscope.interactive.query import InteractiveQuery
from graphscope.interactive.query import InteractiveQueryStatus
from graphscope.proto import message_pb2
from graphscope.proto import op_def_pb2
from graphscope.proto import types_pb2
DEFAULT_CONFIG_FILE = os.environ.get(
"GS_CONFIG_PATH", os.path.expanduser("~/.graphscope/session.json")
)
_session_dict = {}
logger = logging.getLogger("graphscope")
class Session(object):
"""A class for interacting with GraphScope graph computation service cluster.
A :class:`Session` object encapsulates the environment in which :class:`Operation`
objects are executed/evaluated.
A session may own resources. It is important to release these resources when
they are no longer required. To do this, invoke the :meth:`close` method
on the session.
A Session can register itself as default session with :meth:`as_default`, and all operations
after that will use the default session. Session deregister itself as a default session
when closed.
The following example demonstrates its usage:
.. code:: python
>>> import graphscope as gs
>>> # use session object explicitly
>>> sess = gs.session()
>>> g = sess.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = s.sssp(g, 4)
>>> s.close()
>>> # or use a session as default
>>> s = gs.session().as_default()
>>> g = g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(pg, 4)
>>> s.close()
We support setup a service cluster and create a RPC session in following ways:
- GraphScope graph computation service run in cluster managed by kubernetes.
>>> s = graphscope.session()
Also, :class:`Session` provides several keyword params for users to define the cluster.
You may use the param :code:`k8s_gs_image` to specify the image for all engine pod, and
param :code:`k8s_engine_cpu` or :code:`k8s_engine_mem` to specify the resources. More,
you can find all params detail in :meth:`__init__` method.
>>> s = graphscope.session(
... k8s_gs_image="registry.cn-hongkong.aliyuncs.com/graphscope/graphscope:latest",
... k8s_vineyard_cpu=0.1,
... k8s_vineyard_mem="256Mi",
... vineyard_shared_mem="4Gi",
... k8s_engine_cpu=0.1,
... k8s_engine_mem="256Mi")
- or all params can be provided by a json configuration file or configuration dict.
>>> s = graphscope.session(config='/tmp/config.json')
>>> # Or
>>> s = graphscope.session(config={'k8s_engine_cpu': 5, 'k8s_engine_mem': '5Gi'})
"""
@set_defaults(gs_config)
def __init__(
self,
config=None,
cluster_type=gs_config.cluster_type,
addr=gs_config.addr,
num_workers=gs_config.num_workers,
preemptive=gs_config.preemptive,
k8s_namespace=gs_config.k8s_namespace,
k8s_service_type=gs_config.k8s_service_type,
k8s_gs_image=gs_config.k8s_gs_image,
k8s_etcd_image=gs_config.k8s_etcd_image,
k8s_gie_graph_manager_image=gs_config.k8s_gie_graph_manager_image,
k8s_zookeeper_image=gs_config.k8s_zookeeper_image,
k8s_image_pull_policy=gs_config.k8s_image_pull_policy,
k8s_image_pull_secrets=gs_config.k8s_image_pull_secrets,
k8s_coordinator_cpu=gs_config.k8s_coordinator_cpu,
k8s_coordinator_mem=gs_config.k8s_coordinator_mem,
k8s_etcd_num_pods=gs_config.k8s_etcd_num_pods,
k8s_etcd_cpu=gs_config.k8s_etcd_cpu,
k8s_etcd_mem=gs_config.k8s_etcd_mem,
k8s_zookeeper_cpu=gs_config.k8s_zookeeper_cpu,
k8s_zookeeper_mem=gs_config.k8s_zookeeper_mem,
k8s_gie_graph_manager_cpu=gs_config.k8s_gie_graph_manager_cpu,
k8s_gie_graph_manager_mem=gs_config.k8s_gie_graph_manager_mem,
k8s_vineyard_daemonset=gs_config.k8s_vineyard_daemonset,
k8s_vineyard_cpu=gs_config.k8s_vineyard_cpu,
k8s_vineyard_mem=gs_config.k8s_vineyard_mem,
vineyard_shared_mem=gs_config.vineyard_shared_mem,
k8s_engine_cpu=gs_config.k8s_engine_cpu,
k8s_engine_mem=gs_config.k8s_engine_mem,
k8s_mars_worker_cpu=gs_config.mars_worker_cpu,
k8s_mars_worker_mem=gs_config.mars_worker_mem,
k8s_mars_scheduler_cpu=gs_config.mars_scheduler_cpu,
k8s_mars_scheduler_mem=gs_config.mars_scheduler_mem,
k8s_volumes=gs_config.k8s_volumes,
k8s_waiting_for_delete=gs_config.k8s_waiting_for_delete,
timeout_seconds=gs_config.timeout_seconds,
dangling_timeout_seconds=gs_config.dangling_timeout_seconds,
with_mars=gs_config.with_mars,
**kw
):
"""Construct a new GraphScope session.
Args:
config (dict or str, optional): The configuration dict or file about how to launch the GraphScope instance.
For str, it will identify it as a path and read the configuration file to build a
session if file exist. If not specified, the global default configuration
:code:`DEFAULT_CONFIG_FILE` will be used, which get value of GS_CONFIG_PATH
in environment. Note that it will overwrite explicit parameters. Defaults to None.
addr (str, optional): The endpoint of a pre-launched GraphScope instance with '<ip>:<port>' format.
A new session id will be generated for each session connection.
cluster_type (str, optional): Deploy GraphScope instance on hosts or k8s cluster. Defaults to k8s.
Available options: "k8s" and "hosts". Note that only support deployed on localhost with hosts mode.
num_workers (int, optional): The number of workers to launch GraphScope engine. Defaults to 2.
preemptive (bool, optional): If True, GraphScope instance will treat resource params (e.g. k8s_coordinator_cpu)
as limits and provide the minimum available value as requests, but this will make pod has a `Burstable` QOS,
which can be preempted by other pods with high QOS. Otherwise, it will set both requests and limits with the
same value.
k8s_namespace (str, optional): Contains the namespace to create all resource inside.
If param missing, it will try to read namespace from kubernetes context, or
a random namespace will be created and deleted if namespace not exist.
Defaults to None.
k8s_service_type (str, optional): Type determines how the GraphScope service is exposed.
Valid options are NodePort, and LoadBalancer. Defaults to NodePort.
k8s_gs_image (str, optional): The GraphScope engine's image.
k8s_etcd_image (str, optional): The image of etcd, which used by vineyard.
k8s_image_pull_policy (str, optional): Kubernetes image pull policy. Defaults to "IfNotPresent".
k8s_image_pull_secrets (list[str], optional): A list of secret name used to authorize pull image.
k8s_gie_graph_manager_image (str, optional): The GraphScope interactive engine's graph manager image.
k8s_zookeeper_image (str, optional): The image of zookeeper, which used by GIE graph manager.
k8s_vineyard_daemonset (str, optional): The name of vineyard Helm deployment to use. GraphScope will try to
discovery the daemonset from kubernetes cluster, then use it if exists, and fallback to launching
a bundled vineyard container otherwise.
k8s_vineyard_cpu (float, optional): Minimum number of CPU cores request for vineyard container. Defaults to 0.5.
k8s_vineyard_mem (str, optional): Minimum number of memory request for vineyard container. Defaults to '512Mi'.
vineyard_shared_mem (str, optional): Init size of vineyard shared memory. Defaults to '4Gi'.
k8s_engine_cpu (float, optional): Minimum number of CPU cores request for engine container. Defaults to 0.5.
k8s_engine_mem (str, optional): Minimum number of memory request for engine container. Defaults to '4Gi'.
k8s_coordinator_cpu (float, optional): Minimum number of CPU cores request for coordinator pod. Defaults to 1.0.
k8s_coordinator_mem (str, optional): Minimum number of memory request for coordinator pod. Defaults to '4Gi'.
k8s_etcd_num_pods (int, optional): The number of etcd pods. Defaults to 3.
k8s_etcd_cpu (float, optional): Minimum number of CPU cores request for etcd pod. Defaults to 0.5.
k8s_etcd_mem (str, optional): Minimum number of memory request for etcd pod. Defaults to '128Mi'.
k8s_zookeeper_cpu (float, optional):
Minimum number of CPU cores request for zookeeper container. Defaults to 0.5.
k8s_zookeeper_mem (str, optional):
Minimum number of memory request for zookeeper container. Defaults to '256Mi'.
k8s_gie_graph_manager_cpu (float, optional):
Minimum number of CPU cores request for graphmanager container. Defaults to 1.0.
k8s_gie_graph_manager_mem (str, optional):
Minimum number of memory request for graphmanager container. Defaults to '4Gi'.
k8s_mars_worker_cpu (float, optional):
Minimum number of CPU cores request for mars worker container. Defaults to 0.5.
k8s_mars_worker_mem (str, optional):
Minimum number of memory request for mars worker container. Defaults to '4Gi'.
k8s_mars_scheduler_cpu (float, optional):
Minimum number of CPU cores request for mars scheduler container. Defaults to 0.5.
k8s_mars_scheduler_mem (str, optional):
Minimum number of memory request for mars scheduler container. Defaults to '2Gi'.
with_mars (bool, optional):
Launch graphscope with mars. Defaults to False.
k8s_volumes (dict, optional): A dict of k8s volume which represents a directory containing data, accessible to the
containers in a pod. Defaults to {}.
For example, you can mount host path with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {
"path": "<path>",
"type": "Directory"
},
"mounts": [
{
"mountPath": "<path1>"
},
{
"mountPath": "<path2>"
}
]
}
}
Or you can mount PVC with:
k8s_volumes = {
"my-data": {
"type": "persistentVolumeClaim",
"field": {
"claimName": "your-pvc-name"
},
"mounts": [
{
"mountPath": "<path1>"
}
]
}
}
Also, you can mount a single volume with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {xxx},
"mounts": {
"mountPath": "<path1>"
}
}
}
timeout_seconds (int, optional): For waiting service ready (or waiting for delete if
k8s_waiting_for_delete is True).
dangling_timeout_seconds (int, optional): After seconds of client disconnect,
coordinator will kill this graphscope instance. Defaults to 600.
Expect this value to be greater than 5 (heartbeat interval).
Disable dangling check by setting -1.
k8s_waiting_for_delete (bool, optional): Waiting for service delete or not. Defaults to False.
**kw (dict, optional): Other optional parameters will be put to :code:`**kw`.
- k8s_minikube_vm_driver: Deprecated.
- k8s_client_config (dict, optional):
Provide configurable parameters for connecting to remote k8s,
which strongly relies on the `kube_config.new_client_from_config` function.
eg: {"config_file": "~/.kube/config", "context": None, "persist_config": True}
config_file: Name of the kube-config file.
context: set the active context. If is set to None, current_context from config file will be used.
persist_config: If True, config file will be updated when changed(e.g GCP token refresh).
- log_level: Deprecated.
Move this param as a global configuration. Set via `graphscope.set_option(log_level='DEBUG')`
- show_log: Deprecated.
Move this param as a global configuration.Set via `graphscope.set_option(show_log=True)`
- k8s_vineyard_shared_mem: Deprecated.
Please use vineyard_shared_mem instead.
Raises:
TypeError: If the given argument combination is invalid and cannot be used to create
a GraphScope session.
"""
num_workers = int(num_workers)
self._config_params = {}
self._accessable_params = (
"addr",
"cluster_type",
"num_workers",
"preemptive",
"k8s_namespace",
"k8s_service_type",
"k8s_gs_image",
"k8s_etcd_image",
"k8s_image_pull_policy",
"k8s_image_pull_secrets",
"k8s_gie_graph_manager_image",
"k8s_zookeeper_image",
"k8s_coordinator_cpu",
"k8s_coordinator_mem",
"k8s_etcd_num_pods",
"k8s_etcd_cpu",
"k8s_etcd_mem",
"k8s_zookeeper_cpu",
"k8s_zookeeper_mem",
"k8s_gie_graph_manager_cpu",
"k8s_gie_graph_manager_mem",
"k8s_vineyard_daemonset",
"k8s_vineyard_cpu",
"k8s_vineyard_mem",
"vineyard_shared_mem",
"k8s_engine_cpu",
"k8s_engine_mem",
"k8s_mars_worker_cpu",
"k8s_mars_worker_mem",
"k8s_mars_scheduler_cpu",
"k8s_mars_scheduler_mem",
"with_mars",
"k8s_volumes",
"k8s_waiting_for_delete",
"timeout_seconds",
"dangling_timeout_seconds",
)
saved_locals = locals()
for param in self._accessable_params:
self._config_params[param] = saved_locals[param]
# parse config, which should be a path to config file, or dict
# config has highest priority
if isinstance(config, dict):
self._config_params.update(config)
elif isinstance(config, str):
self._load_config(config, False)
elif DEFAULT_CONFIG_FILE:
self._load_config(DEFAULT_CONFIG_FILE)
# update other optional params
self._config_params.update(kw)
# initial setting of cluster_type
self._cluster_type = self._parse_cluster_type()
# mars cannot work with run-on-local mode
if self._cluster_type == types_pb2.HOSTS and self._config_params["with_mars"]:
raise NotImplementedError(
"Mars cluster cannot be launched along with local GraphScope deployment"
)
# deprecated params handle
if "show_log" in kw:
warnings.warn(
"The `show_log` parameter has been deprecated and has no effect, "
"please use `graphscope.set_option(show_log=%s)` instead."
% kw.pop("show_log", None),
category=DeprecationWarning,
)
if "log_level" in kw:
warnings.warn(
"The `log_level` parameter has been deprecated and has no effect, "
"please use `graphscope.set_option(log_level=%r)` instead."
% kw.pop("show_log", None),
category=DeprecationWarning,
)
if "k8s_vineyard_shared_mem" in kw:
warnings.warn(
"The `k8s_vineyard_shared_mem` has benn deprecated and has no effect, "
"please use `vineyard_shared_mem` instead."
% kw.pop("k8s_vineyard_shared_mem", None),
category=DeprecationWarning,
)
# update k8s_client_config params
self._config_params["k8s_client_config"] = kw.pop("k8s_client_config", {})
# There should be no more custom keyword arguments.
if kw:
raise ValueError("Not recognized value: ", list(kw.keys()))
if self._config_params["addr"]:
logger.info(
"Connecting graphscope session with address: %s",
self._config_params["addr"],
)
else:
logger.info(
"Initializing graphscope session with parameters: %s",
self._config_params,
)
self._closed = False
# coordinator service endpoint
self._coordinator_endpoint = None
self._launcher = None
self._heartbeat_sending_thread = None
self._grpc_client = None
self._session_id = None # unique identifier across sessions
# engine config:
#
# {
# "experiment": "ON/OFF",
# "vineyard_socket": "...",
# "vineyard_rpc_endpoint": "..."
# }
self._engine_config = None
# interactive instance related graph map
self._interactive_instance_dict = {}
# learning engine related graph map
self._learning_instance_dict = {}
self._default_session = None
atexit.register(self.close)
# create and connect session
with CaptureKeyboardInterrupt(self.close):
self._connect()
self._disconnected = False
# heartbeat
self._heartbeat_interval_seconds = 5
self._heartbeat_sending_thread = threading.Thread(
target=self._send_heartbeat, args=()
)
self._heartbeat_sending_thread.daemon = True
self._heartbeat_sending_thread.start()
def __repr__(self):
return str(self.info)
def __str__(self):
return repr(self)
@property
def session_id(self):
return self._session_id
def _load_config(self, path, slient=True):
config_path = os.path.expandvars(os.path.expanduser(path))
try:
with open(config_path, "r") as f:
data = json.load(f)
self._config_params.update(data)
except Exception as exp: # noqa
if not slient:
raise exp
def _parse_cluster_type(self):
if self._config_params["addr"] is not None:
# get the cluster type after connecting
return types_pb2.UNDEFINED
else:
if self._config_params["cluster_type"] == "hosts":
self._run_on_local()
return types_pb2.HOSTS
elif self._config_params["cluster_type"] == "k8s":
return types_pb2.K8S
else:
raise ValueError("Expect hosts or k8s of cluster_type parameter")
@property
def engine_config(self):
"""Show the engine configration associated with session in json format."""
return self._engine_config
@property
def info(self):
"""Show all resources info associated with session in json format."""
info = {}
if self._closed:
info["status"] = "closed"
elif self._grpc_client is None or self._disconnected:
info["status"] = "disconnected"
else:
info["status"] = "active"
if self._cluster_type == types_pb2.K8S:
info["type"] = "k8s"
info["engine_hosts"] = ",".join(self._pod_name_list)
info["namespace"] = self._config_params["k8s_namespace"]
else:
info["type"] = "hosts"
info["engine_hosts"] = self._engine_config["engine_hosts"]
info["cluster_type"] = str(self._cluster_type)
info["session_id"] = self.session_id
info["num_workers"] = self._config_params["num_workers"]
info["coordinator_endpoint"] = self._coordinator_endpoint
info["engine_config"] = self._engine_config
return info
def _send_heartbeat(self):
while not self._closed:
if self._grpc_client:
try:
self._grpc_client.send_heartbeat()
except GRPCError as exc:
logger.warning(exc)
self._disconnected = True
else:
self._disconnected = False
time.sleep(self._heartbeat_interval_seconds)
def close(self):
"""Closes this session.
This method frees all resources associated with the session.
"""
if self._closed:
return
self._closed = True
self._coordinator_endpoint = None
self._deregister_default()
if self._heartbeat_sending_thread:
self._heartbeat_sending_thread.join(
timeout=self._heartbeat_interval_seconds
)
self._heartbeat_sending_thread = None
self._disconnected = True
# close all interactive instances
for instance in self._interactive_instance_dict.values():
try:
if instance is not None:
instance.close()
except InteractiveEngineInternalError:
pass
self._interactive_instance_dict.clear()
# close all learning instances
for instance in self._learning_instance_dict.values():
try:
if instance is not None:
instance.close()
except LearningEngineInternalError:
pass
self._learning_instance_dict.clear()
if self._grpc_client:
self._grpc_client.close()
self._grpc_client = None
_session_dict.pop(self._session_id, None)
# clean up
if self._config_params["addr"] is None:
if self._launcher:
self._launcher.stop()
self._pod_name_list = []
def _close_interactive_instance(self, instance):
"""Close a interactive instance."""
if self._grpc_client:
self._grpc_client.close_interactive_engine(instance.object_id)
self._interactive_instance_dict[instance.object_id] = None
def _close_learning_instance(self, instance):
"""Close a learning instance."""
if self._grpc_client:
self._grpc_client.close_learning_engine(instance.object_id)
self._learning_instance_dict[instance.object_id] = None
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
def as_default(self):
"""Obtain a context manager that make this object as default session.
This method is used when a Session is constructed, which will immediately
install self as a default session.
Raises:
ValueError: If default session exist in current context.
Returns:
A context manager using this session as the default session.
"""
if not _default_session_stack.is_cleared():
raise ValueError(
"A default session is already active. You must explicitly call Session.close()."
)
# session context manager
self._default_session = default_session(self)
self._default_session.__enter__()
def _deregister_default(self):
"""Remove self from the default session stack."""
if self._default_session:
self._default_session.__exit__(None, None, None)
self._default_session = None
def run(self, fetch):
"""Run operations of `fetch`.
Args:
fetch: :class:`Operation`
Raises:
RuntimeError:
Client disconnect to the service. Or run on a closed session.
ValueError:
If fetch is not a instance of :class:`Operation`. Or
the fetch has been evaluated.
InvalidArgumentError:
Not recognized on output type.
Returns:
Different values for different output types of :class:`Operation`
"""
# prepare names to run and fetch
if hasattr(fetch, "op"):
fetch = fetch.op
if not isinstance(fetch, Operation):
raise ValueError("Expect a `Operation`")
if fetch.output is not None:
raise ValueError("The op <%s> are evaluated duplicated." % fetch.key)
# convert to list to be compatible with rpc client method signature
fetch_ops = [fetch]
dag = op_def_pb2.DagDef()
for op in fetch_ops:
dag.op.extend([copy.deepcopy(op.as_op_def())])
if self._closed:
raise RuntimeError("Attempted to use a closed Session.")
if not self._grpc_client:
raise RuntimeError("Session disconnected.")
# execute the query
try:
response = self._grpc_client.run(dag)
except FatalError:
self.close()
raise
check_argument(
len(fetch_ops) == 1, "Cannot execute multiple ops at the same time"
)
return self._parse_value(fetch_ops[0], response)
def _parse_value(self, op, response: message_pb2.RunStepResponse):
# attach an output to op, indicating the op is already run.
op.set_output(response.metrics)
# if loads a arrow property graph, will return {'object_id': xxxx}
if op.output_types == types_pb2.GRAPH:
return response.graph_def
if op.output_types == types_pb2.APP:
return response.result.decode("utf-8")
if op.output_types in (
types_pb2.RESULTS,
types_pb2.VINEYARD_TENSOR,
types_pb2.VINEYARD_DATAFRAME,
):
return response.result.decode("utf-8")
if op.output_types in (types_pb2.TENSOR, types_pb2.DATAFRAME):
return response.result
else:
raise InvalidArgumentError(
"Not recognized output type: %s" % op.output_types
)
def _connect(self):
if self._config_params["addr"] is not None:
# try connect to exist coordinator
self._coordinator_endpoint = self._config_params["addr"]
elif self._cluster_type == types_pb2.K8S:
if (
self._config_params["k8s_etcd_image"] is None
or self._config_params["k8s_gs_image"] is None
):
raise K8sError("None image found.")
if isinstance(
self._config_params["k8s_client_config"],
kube_client.api_client.ApiClient,
):
api_client = self._config_params["k8s_client_config"]
else:
api_client = kube_config.new_client_from_config(
**self._config_params["k8s_client_config"]
)
self._launcher = KubernetesClusterLauncher(
api_client=api_client,
namespace=self._config_params["k8s_namespace"],
service_type=self._config_params["k8s_service_type"],
num_workers=self._config_params["num_workers"],
gs_image=self._config_params["k8s_gs_image"],
preemptive=self._config_params["preemptive"],
etcd_image=self._config_params["k8s_etcd_image"],
gie_graph_manager_image=self._config_params[
"k8s_gie_graph_manager_image"
],
zookeeper_image=self._config_params["k8s_zookeeper_image"],
image_pull_policy=self._config_params["k8s_image_pull_policy"],
image_pull_secrets=self._config_params["k8s_image_pull_secrets"],
vineyard_daemonset=self._config_params["k8s_vineyard_daemonset"],
vineyard_cpu=self._config_params["k8s_vineyard_cpu"],
vineyard_mem=self._config_params["k8s_vineyard_mem"],
vineyard_shared_mem=self._config_params["vineyard_shared_mem"],
etcd_num_pods=self._config_params["k8s_etcd_num_pods"],
etcd_cpu=self._config_params["k8s_etcd_cpu"],
etcd_mem=self._config_params["k8s_etcd_mem"],
zookeeper_cpu=self._config_params["k8s_zookeeper_cpu"],
zookeeper_mem=self._config_params["k8s_zookeeper_mem"],
gie_graph_manager_cpu=self._config_params["k8s_gie_graph_manager_cpu"],
gie_graph_manager_mem=self._config_params["k8s_gie_graph_manager_mem"],
engine_cpu=self._config_params["k8s_engine_cpu"],
engine_mem=self._config_params["k8s_engine_mem"],
mars_worker_cpu=self._config_params["k8s_mars_worker_cpu"],
mars_worker_mem=self._config_params["k8s_mars_worker_mem"],
mars_scheduler_cpu=self._config_params["k8s_mars_scheduler_cpu"],
mars_scheduler_mem=self._config_params["k8s_mars_scheduler_mem"],
with_mars=self._config_params["with_mars"],
coordinator_cpu=float(self._config_params["k8s_coordinator_cpu"]),
coordinator_mem=self._config_params["k8s_coordinator_mem"],
volumes=self._config_params["k8s_volumes"],
waiting_for_delete=self._config_params["k8s_waiting_for_delete"],
timeout_seconds=self._config_params["timeout_seconds"],
dangling_timeout_seconds=self._config_params[
"dangling_timeout_seconds"
],
)
elif (
self._cluster_type == types_pb2.HOSTS
and isinstance(self._config_params["hosts"], list)
and len(self._config_params["hosts"]) != 0
and self._config_params["num_workers"] > 0
):
# lanuch coordinator with hosts
self._launcher = HostsClusterLauncher(
hosts=self._config_params["hosts"],
port=self._config_params["port"],
num_workers=self._config_params["num_workers"],
vineyard_socket=self._config_params["vineyard_socket"],
timeout_seconds=self._config_params["timeout_seconds"],
vineyard_shared_mem=self._config_params["vineyard_shared_mem"],
)
else:
raise RuntimeError("Session initialize failed.")
# launching graphscope service
if self._launcher is not None:
self._launcher.start()
self._coordinator_endpoint = self._launcher.coordinator_endpoint
# waiting service ready
self._grpc_client = GRPCClient(self._coordinator_endpoint)
self._grpc_client.waiting_service_ready(
timeout_seconds=self._config_params["timeout_seconds"],
)
# connect and fetch logs from rpc server
try:
(
self._session_id,
self._cluster_type,
self._engine_config,
self._pod_name_list,
self._config_params["num_workers"],
self._config_params["k8s_namespace"],
) = self._grpc_client.connect(
cleanup_instance=not bool(self._config_params["addr"]),
dangling_timeout_seconds=self._config_params[
"dangling_timeout_seconds"
],
)
# fetch logs
if self._config_params["addr"] or self._cluster_type == types_pb2.K8S:
self._grpc_client.fetch_logs()
_session_dict[self._session_id] = self
except Exception:
self.close()
raise
def get_config(self):
"""Get configuration of the session."""
return self._config_params
def g(self, incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return Graph(self, incoming_data, oid_type, directed, generate_eid)
def load_from(self, *args, **kwargs):
"""Load a graph within the session.
See more information in :meth:`graphscope.load_from`.
"""
with default_session(self):
return graphscope.load_from(*args, **kwargs)
def _run_on_local(self):
self._config_params["hosts"] = ["localhost"]
self._config_params["port"] = None
self._config_params["vineyard_socket"] = ""
def _get_gl_handle(self, graph):
"""Dump a handler for GraphLearn for interaction.
Fields in :code:`schema` are:
+ the name of node type or edge type
+ whether the graph is weighted graph
+ whether the graph is labeled graph
+ the number of int attributes
+ the number of float attributes
+ the number of string attributes
An example of the graph handle:
.. code:: python
{
"server": "127.0.0.1:8888,127.0.0.1:8889",
"client_count": 1,
"vineyard_socket": "/var/run/vineyard.sock",
"vineyard_id": 13278328736,
"node_schema": [
"user:false:false:10:0:0",
"item:true:false:0:0:5"
],
"edge_schema": [
"user:click:item:true:false:0:0:0",
"user:buy:item:true:true:0:0:0",
"item:similar:item:false:false:10:0:0"
],
"node_attribute_types": {
"person": {
"age": "i",
"name": "s",
},
},
"edge_attribute_types": {
"knows": {
"weight": "f",
},
},
}
The handle can be decoded using:
.. code:: python
base64.b64decode(handle.encode('ascii')).decode('ascii')
Note that the ports are selected from a range :code:`(8000, 9000)`.
Args:
graph (:class:`Graph`): A Property Graph.
client_number (int): Number of client.
Returns:
str: Base64 encoded handle
Raises:
InvalidArgumentError: If the graph is not loaded, or graph_type isn't
`ARROW_PROPERTY`.
"""
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
if not graph.graph_type == types_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
def group_property_types(props):
weighted, labeled, i, f, s, attr_types = "false", "false", 0, 0, 0, {}
for prop in props:
if prop.type in [types_pb2.STRING]:
s += 1
attr_types[prop.name] = "s"
elif prop.type in (types_pb2.FLOAT, types_pb2.DOUBLE):
f += 1
attr_types[prop.name] = "f"
else:
i += 1
attr_types[prop.name] = "i"
if prop.name == "weight":
weighted = "true"
elif prop.name == "label":
labeled = "true"
return weighted, labeled, i, f, s, attr_types
node_schema, node_attribute_types = [], dict()
for label in graph.schema.vertex_labels:
weighted, labeled, i, f, s, attr_types = group_property_types(
graph.schema.get_vertex_properties(label)
)
node_schema.append(
"{}:{}:{}:{}:{}:{}".format(label, weighted, labeled, i, f, s)
)
node_attribute_types[label] = attr_types
edge_schema, edge_attribute_types = [], dict()
for label in graph.schema.edge_labels:
weighted, labeled, i, f, s, attr_types = group_property_types(
graph.schema.get_edge_properties(label)
)
for rel in graph.schema.get_relationships(label):
edge_schema.append(
"{}:{}:{}:{}:{}:{}:{}:{}".format(
rel[0], label, rel[1], weighted, labeled, i, f, s
)
)
edge_attribute_types[label] = attr_types
handle = {
"hosts": self.info["engine_hosts"],
"client_count": 1,
"vineyard_id": graph.vineyard_id,
"vineyard_socket": self._engine_config["vineyard_socket"],
"node_schema": node_schema,
"edge_schema": edge_schema,
"node_attribute_types": node_attribute_types,
"edge_attribute_types": edge_attribute_types,
}
handle_json_string = json.dumps(handle)
return base64.b64encode(handle_json_string.encode("utf-8")).decode("utf-8")
@set_defaults(gs_config)
def gremlin(self, graph, engine_params=None):
"""Get a interactive engine handler to execute gremlin queries.
Note that this method will be executed implicitly when a property graph created
and cache a instance of InteractiveQuery in session if `initializing_interactive_engine`
is True. If you want to create a new instance under the same graph by different params,
you should close the instance first.
.. code:: python
>>> # close and recreate InteractiveQuery.
>>> interactive_query = sess.gremlin(g)
>>> interactive_query.close()
>>> interactive_query = sess.gremlin(g, engine_params={"xxx":"xxx"})
Args:
graph (:class:`Graph`): Use the graph to create interactive instance.
engine_params (dict, optional): Configure startup parameters of interactive engine.
You can also configure this param by `graphscope.set_option(engine_params={})`.
See a list of configurable keys in
`interactive_engine/deploy/docker/dockerfile/executor.vineyard.properties`
Raises:
InvalidArgumentError: :code:`graph` is not a property graph or unloaded.
Returns:
:class:`InteractiveQuery`
"""
# self._interactive_instance_dict[graph.vineyard_id] will be None if
# InteractiveQuery closed
if (
graph.vineyard_id in self._interactive_instance_dict
and self._interactive_instance_dict[graph.vineyard_id] is not None
):
interactive_query = self._interactive_instance_dict[graph.vineyard_id]
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(interactive_query.error_msg)
else:
# Initializing.
# while True is ok, as the status is either running or failed eventually after timeout.
while True:
time.sleep(1)
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(
interactive_query.error_msg
)
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
if not graph.graph_type == types_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
interactive_query = InteractiveQuery(session=self, object_id=graph.vineyard_id)
self._interactive_instance_dict[graph.vineyard_id] = interactive_query
if engine_params is not None:
engine_params = {
str(key): str(value) for key, value in engine_params.items()
}
else:
engine_params = {}
try:
response = self._grpc_client.create_interactive_engine(
object_id=graph.vineyard_id,
schema_path=graph.schema_path,
gremlin_server_cpu=gs_config.k8s_gie_gremlin_server_cpu,
gremlin_server_mem=gs_config.k8s_gie_gremlin_server_mem,
engine_params=engine_params,
)
except Exception as e:
interactive_query.status = InteractiveQueryStatus.Failed
interactive_query.error_msg = str(e)
raise InteractiveEngineInternalError(str(e)) from e
else:
interactive_query.set_frontend(
front_ip=response.frontend_host, front_port=response.frontend_port
)
interactive_query.status = InteractiveQueryStatus.Running
graph._attach_interactive_instance(interactive_query)
return interactive_query
def learning(self, graph, nodes=None, edges=None, gen_labels=None):
"""Start a graph learning engine.
Args:
nodes (list): The node types that will be used for gnn training.
edges (list): The edge types that will be used for gnn training.
gen_labels (list): Extra node and edge labels on original graph for gnn training.
Returns:
`graphscope.learning.Graph`: An instance of `graphscope.learning.Graph`
that could be feed to the learning engine.
"""
if (
graph.vineyard_id in self._learning_instance_dict
and self._learning_instance_dict[graph.vineyard_id] is not None
):
return self._learning_instance_dict[graph.vineyard_id]
if sys.platform != "linux" and sys.platform != "linux2":
raise RuntimeError(
"The learning engine currently supports Linux only, doesn't support %s"
% sys.platform
)
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
if not graph.graph_type == types_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
from graphscope.learning.graph import Graph as LearningGraph
handle = self._get_gl_handle(graph)
config = LearningGraph.preprocess_args(handle, nodes, edges, gen_labels)
config = base64.b64encode(json.dumps(config).encode("utf-8")).decode("utf-8")
endpoints = self._grpc_client.create_learning_engine(
graph.vineyard_id, handle, config
)
handle = json.loads(base64.b64decode(handle.encode("utf-8")).decode("utf-8"))
handle["server"] = endpoints
handle["client_count"] = 1
learning_graph = LearningGraph(handle, config, graph.vineyard_id, self)
self._learning_instance_dict[graph.vineyard_id] = learning_graph
graph._attach_learning_instance(learning_graph)
return learning_graph
session = Session
def set_option(**kwargs):
"""Set the value of specified options.
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_gie_graph_manager_image
- k8s_zookeeper_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
kwargs: dict
kv pair of GraphScope config you want to set.
Raises:
ValueError: If no such option exists.
Returns: None
"""
# check exists
for k, v in kwargs.items():
if not hasattr(gs_config, k):
raise ValueError("No such option {} exists.".format(k))
for k, v in kwargs.items():
setattr(gs_config, k, v)
GSLogger.update()
def get_option(key):
"""Get the value of specified option.
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_gie_graph_manager_image
- k8s_zookeeper_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
key: str
Key of GraphScope config you want to get.
Raises:
ValueError: If no such option exists.
Returns: result: the value of the option
"""
if hasattr(gs_config, key):
return getattr(gs_config, key)
else:
raise ValueError("No such option {} exists.".format(key))
def default_session(session):
"""Python's :code:`with` handler for defining a default session.
This function provides a means of registering a session for handling
and code that need a default session calls.
The :code:`with` keyword to specify that code invocations within
the scope of a block should be executed by a particular session.
Args:
session: :class:`Session`
The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current context.
Raises:
RuntimeError: Default session is not exist.
Returns:
The default :class:`Session`.
"""
return _default_session_stack.get_default()
def get_session_by_id(handle):
"""Return the session by handle."""
if handle not in _session_dict:
raise ValueError("Session not exists.")
return _session_dict.get(handle)
class _DefaultSessionStack(object):
"""A stack of objects for providing implicit defaults."""
def __init__(self):
super().__init__()
self.stack = []
def get_default(self):
if not self.stack:
raise RuntimeError("No default session found.")
return self.stack[-1]
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
self.stack.remove(default)
_default_session_stack = _DefaultSessionStack() # pylint: disable=protected-access
def g(incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return get_default_session().g(incoming_data, oid_type, directed, generate_eid)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.